diff --git a/go.mod b/go.mod index 2371236fe..503e93c38 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/openshift/hypershift-oadp-plugin -go 1.25.3 +go 1.25.7 toolchain go1.25.8 @@ -26,7 +26,6 @@ require ( github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0 // indirect github.com/openshift/api v0.0.0-20260120150926-4c643a652d54 // indirect github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -77,8 +76,8 @@ require ( golang.org/x/term v0.41.0 // indirect golang.org/x/text v0.35.0 // indirect golang.org/x/time v0.14.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect - google.golang.org/grpc v1.72.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect + google.golang.org/grpc v1.77.0 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -91,4 +90,4 @@ require ( sigs.k8s.io/yaml v1.6.0 // indirect ) -replace github.com/vmware-tanzu/velero => github.com/openshift/velero v0.10.2-0.20250514165055-8fbcf3a8da11 +replace github.com/vmware-tanzu/velero => github.com/openshift/velero v0.10.2-0.20260323170432-5ef912f438f6 diff --git a/go.sum b/go.sum index adc63e768..c8c5317fe 100644 --- a/go.sum +++ b/go.sum @@ -138,8 +138,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -148,8 +148,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0 h1:j3YK74myEQRxR/srciTpOrm221SAvz6J5OVWbyfeXFo= -github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0/go.mod h1:FlyYFe32mPxKEPaRXKNxfX576d1AoCzstYDoOOnyMA4= github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 h1:bMqrb3UHgHbP+PW9VwiejfDJU1R0PpXVZNMdeH8WYKI= github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -211,8 +209,8 @@ github.com/openshift/hive/apis v0.0.0-20241220022629-3f49f26197ff h1:6C1z4xMAruy github.com/openshift/hive/apis v0.0.0-20241220022629-3f49f26197ff/go.mod h1:1vBNCcWNpQyFCz83PWYT/lHUFJ9ost2t5FijHElh6gQ= github.com/openshift/hypershift/api v0.0.0-20260317154635-8eaac177f1b0 h1:x5DgHyFXF9zpgTH4JYDpBhQnASEIj6z1WXdeHl1DGAI= github.com/openshift/hypershift/api v0.0.0-20260317154635-8eaac177f1b0/go.mod h1:eYDwJzXCU+0HO9DdvhBAA143z7woIJ5dV71TaJXIkgk= -github.com/openshift/velero v0.10.2-0.20250514165055-8fbcf3a8da11 h1:/BjkW8HljIX96clCuv/V+PzShD3coVcNilCXd/Axlfo= -github.com/openshift/velero v0.10.2-0.20250514165055-8fbcf3a8da11/go.mod h1:+wInt9pLqlRiUZAWsO5eSxLZK9Q3jSV9MFaUrBfvGN0= +github.com/openshift/velero v0.10.2-0.20260323170432-5ef912f438f6 h1:nWv9+tEi34VMFMOziVZLOKvn3yXDqk68ODXlzpYU2S0= +github.com/openshift/velero v0.10.2-0.20260323170432-5ef912f438f6/go.mod h1:YKHPM2FpS+5WrcciMi5j4bo/JMnXXrR1M+j1DoTA26U= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -259,18 +257,18 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -388,19 +386,21 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= -google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/LICENSE b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/types.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/types.go deleted file mode 100644 index fdb867cdb..000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/types.go +++ /dev/null @@ -1,456 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +kubebuilder:object:generate=true -package v1 - -import ( - core_v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshot is a user's request for either creating a point-in-time -// snapshot of a persistent volume, or binding to a pre-existing snapshot. -// +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Namespaced,shortName=vs -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if the snapshot is ready to be used to restore a volume." -// +kubebuilder:printcolumn:name="SourcePVC",type=string,JSONPath=`.spec.source.persistentVolumeClaimName`,description="If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created." -// +kubebuilder:printcolumn:name="SourceSnapshotContent",type=string,JSONPath=`.spec.source.volumeSnapshotContentName`,description="If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot." -// +kubebuilder:printcolumn:name="RestoreSize",type=string,JSONPath=`.status.restoreSize`,description="Represents the minimum size of volume required to rehydrate from this snapshot." -// +kubebuilder:printcolumn:name="SnapshotClass",type=string,JSONPath=`.spec.volumeSnapshotClassName`,description="The name of the VolumeSnapshotClass requested by the VolumeSnapshot." -// +kubebuilder:printcolumn:name="SnapshotContent",type=string,JSONPath=`.status.boundVolumeSnapshotContentName`,description="Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object." -// +kubebuilder:printcolumn:name="CreationTime",type=date,JSONPath=`.status.creationTime`,description="Timestamp when the point-in-time snapshot was taken by the underlying storage system." -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -type VolumeSnapshot struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the desired characteristics of a snapshot requested by a user. - // More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots - // Required. - Spec VolumeSnapshotSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // status represents the current information of a snapshot. - // Consumers must verify binding between VolumeSnapshot and - // VolumeSnapshotContent objects is successful (by validating that both - // VolumeSnapshot and VolumeSnapshotContent point at each other) before - // using this object. - // +optional - Status *VolumeSnapshotStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// VolumeSnapshotList is a list of VolumeSnapshot objects -type VolumeSnapshotList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of VolumeSnapshots - Items []VolumeSnapshot `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeSnapshotSpec describes the common attributes of a volume snapshot. -type VolumeSnapshotSpec struct { - // source specifies where a snapshot will be created from. - // This field is immutable after creation. - // Required. - Source VolumeSnapshotSource `json:"source" protobuf:"bytes,1,opt,name=source"` - - // VolumeSnapshotClassName is the name of the VolumeSnapshotClass - // requested by the VolumeSnapshot. - // VolumeSnapshotClassName may be left nil to indicate that the default - // SnapshotClass should be used. - // A given cluster may have multiple default Volume SnapshotClasses: one - // default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, - // VolumeSnapshotSource will be checked to figure out what the associated - // CSI Driver is, and the default VolumeSnapshotClass associated with that - // CSI Driver will be used. If more than one VolumeSnapshotClass exist for - // a given CSI Driver and more than one have been marked as default, - // CreateSnapshot will fail and generate an event. - // Empty string is not allowed for this field. - // +optional - VolumeSnapshotClassName *string `json:"volumeSnapshotClassName,omitempty" protobuf:"bytes,2,opt,name=volumeSnapshotClassName"` -} - -// VolumeSnapshotSource specifies whether the underlying snapshot should be -// dynamically taken upon creation or if a pre-existing VolumeSnapshotContent -// object should be used. -// Exactly one of its members must be set. -// Members in VolumeSnapshotSource are immutable. -type VolumeSnapshotSource struct { - // persistentVolumeClaimName specifies the name of the PersistentVolumeClaim - // object representing the volume from which a snapshot should be created. - // This PVC is assumed to be in the same namespace as the VolumeSnapshot - // object. - // This field should be set if the snapshot does not exists, and needs to be - // created. - // This field is immutable. - // +optional - PersistentVolumeClaimName *string `json:"persistentVolumeClaimName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeClaimName"` - - // volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent - // object representing an existing volume snapshot. - // This field should be set if the snapshot already exists and only needs a representation in Kubernetes. - // This field is immutable. - // +optional - VolumeSnapshotContentName *string `json:"volumeSnapshotContentName,omitempty" protobuf:"bytes,2,opt,name=volumeSnapshotContentName"` -} - -// VolumeSnapshotStatus is the status of the VolumeSnapshot -// Note that CreationTime, RestoreSize, ReadyToUse, and Error are in both -// VolumeSnapshotStatus and VolumeSnapshotContentStatus. Fields in VolumeSnapshotStatus -// are updated based on fields in VolumeSnapshotContentStatus. They are eventual -// consistency. These fields are duplicate in both objects due to the following reasons: -// - Fields in VolumeSnapshotContentStatus can be used for filtering when importing a -// volumesnapshot. -// - VolumsnapshotStatus is used by end users because they cannot see VolumeSnapshotContent. -// - CSI snapshotter sidecar is light weight as it only watches VolumeSnapshotContent -// object, not VolumeSnapshot object. -type VolumeSnapshotStatus struct { - // boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent - // object to which this VolumeSnapshot object intends to bind to. - // If not specified, it indicates that the VolumeSnapshot object has not been - // successfully bound to a VolumeSnapshotContent object yet. - // NOTE: To avoid possible security issues, consumers must verify binding between - // VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that - // both VolumeSnapshot and VolumeSnapshotContent point at each other) before using - // this object. - // +optional - BoundVolumeSnapshotContentName *string `json:"boundVolumeSnapshotContentName,omitempty" protobuf:"bytes,1,opt,name=boundVolumeSnapshotContentName"` - - // creationTime is the timestamp when the point-in-time snapshot is taken - // by the underlying storage system. - // In dynamic snapshot creation case, this field will be filled in by the - // snapshot controller with the "creation_time" value returned from CSI - // "CreateSnapshot" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "creation_time" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - // If not specified, it may indicate that the creation time of the snapshot is unknown. - // +optional - CreationTime *metav1.Time `json:"creationTime,omitempty" protobuf:"bytes,2,opt,name=creationTime"` - - // readyToUse indicates if the snapshot is ready to be used to restore a volume. - // In dynamic snapshot creation case, this field will be filled in by the - // snapshot controller with the "ready_to_use" value returned from CSI - // "CreateSnapshot" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "ready_to_use" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, - // otherwise, this field will be set to "True". - // If not specified, it means the readiness of a snapshot is unknown. - // +optional - ReadyToUse *bool `json:"readyToUse,omitempty" protobuf:"varint,3,opt,name=readyToUse"` - - // restoreSize represents the minimum size of volume required to create a volume - // from this snapshot. - // In dynamic snapshot creation case, this field will be filled in by the - // snapshot controller with the "size_bytes" value returned from CSI - // "CreateSnapshot" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "size_bytes" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - // When restoring a volume from this snapshot, the size of the volume MUST NOT - // be smaller than the restoreSize if it is specified, otherwise the restoration will fail. - // If not specified, it indicates that the size is unknown. - // +optional - RestoreSize *resource.Quantity `json:"restoreSize,omitempty" protobuf:"bytes,4,opt,name=restoreSize"` - - // error is the last observed error during snapshot creation, if any. - // This field could be helpful to upper level controllers(i.e., application controller) - // to decide whether they should continue on waiting for the snapshot to be created - // based on the type of error reported. - // The snapshot controller will keep retrying when an error occurs during the - // snapshot creation. Upon success, this error field will be cleared. - // +optional - Error *VolumeSnapshotError `json:"error,omitempty" protobuf:"bytes,5,opt,name=error,casttype=VolumeSnapshotError"` - - // VolumeGroupSnapshotName is the name of the VolumeGroupSnapshot of which this - // VolumeSnapshot is a part of. - // +optional - VolumeGroupSnapshotName *string `json:"volumeGroupSnapshotName,omitempty" protobuf:"bytes,6,opt,name=volumeGroupSnapshotName"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshotClass specifies parameters that a underlying storage system uses when -// creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its -// name in a VolumeSnapshot object. -// VolumeSnapshotClasses are non-namespaced -// +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Cluster,shortName=vsclass;vsclasses -// +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.driver` -// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.deletionPolicy`,description="Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted." -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -type VolumeSnapshotClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // driver is the name of the storage driver that handles this VolumeSnapshotClass. - // Required. - Driver string `json:"driver" protobuf:"bytes,2,opt,name=driver"` - - // parameters is a key-value map with storage driver specific parameters for creating snapshots. - // These values are opaque to Kubernetes. - // +optional - Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - - // deletionPolicy determines whether a VolumeSnapshotContent created through - // the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. - // Supported values are "Retain" and "Delete". - // "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. - // "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. - // Required. - DeletionPolicy DeletionPolicy `json:"deletionPolicy" protobuf:"bytes,4,opt,name=deletionPolicy"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshotClassList is a collection of VolumeSnapshotClasses. -// +kubebuilder:object:root=true -type VolumeSnapshotClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of VolumeSnapshotClasses - Items []VolumeSnapshotClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshotContent represents the actual "on-disk" snapshot object in the -// underlying storage system -// +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Cluster,shortName=vsc;vscs -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if the snapshot is ready to be used to restore a volume." -// +kubebuilder:printcolumn:name="RestoreSize",type=integer,JSONPath=`.status.restoreSize`,description="Represents the complete size of the snapshot in bytes" -// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.spec.deletionPolicy`,description="Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted." -// +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.spec.driver`,description="Name of the CSI driver used to create the physical snapshot on the underlying storage system." -// +kubebuilder:printcolumn:name="VolumeSnapshotClass",type=string,JSONPath=`.spec.volumeSnapshotClassName`,description="Name of the VolumeSnapshotClass to which this snapshot belongs." -// +kubebuilder:printcolumn:name="VolumeSnapshot",type=string,JSONPath=`.spec.volumeSnapshotRef.name`,description="Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound." -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -type VolumeSnapshotContent struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines properties of a VolumeSnapshotContent created by the underlying storage system. - // Required. - Spec VolumeSnapshotContentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // status represents the current information of a snapshot. - // +optional - Status *VolumeSnapshotContentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshotContentList is a list of VolumeSnapshotContent objects -// +kubebuilder:object:root=true -type VolumeSnapshotContentList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of VolumeSnapshotContents - Items []VolumeSnapshotContent `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeSnapshotContentSpec is the specification of a VolumeSnapshotContent -type VolumeSnapshotContentSpec struct { - // volumeSnapshotRef specifies the VolumeSnapshot object to which this - // VolumeSnapshotContent object is bound. - // VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to - // this VolumeSnapshotContent's name for the bidirectional binding to be valid. - // For a pre-existing VolumeSnapshotContent object, name and namespace of the - // VolumeSnapshot object MUST be provided for binding to happen. - // This field is immutable after creation. - // Required. - VolumeSnapshotRef core_v1.ObjectReference `json:"volumeSnapshotRef" protobuf:"bytes,1,opt,name=volumeSnapshotRef"` - - // deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on - // the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. - // Supported values are "Retain" and "Delete". - // "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. - // "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. - // For dynamically provisioned snapshots, this field will automatically be filled in by the - // CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding - // VolumeSnapshotClass. - // For pre-existing snapshots, users MUST specify this field when creating the - // VolumeSnapshotContent object. - // Required. - DeletionPolicy DeletionPolicy `json:"deletionPolicy" protobuf:"bytes,2,opt,name=deletionPolicy"` - - // driver is the name of the CSI driver used to create the physical snapshot on - // the underlying storage system. - // This MUST be the same as the name returned by the CSI GetPluginName() call for - // that driver. - // Required. - Driver string `json:"driver" protobuf:"bytes,3,opt,name=driver"` - - // name of the VolumeSnapshotClass from which this snapshot was (or will be) - // created. - // Note that after provisioning, the VolumeSnapshotClass may be deleted or - // recreated with different set of values, and as such, should not be referenced - // post-snapshot creation. - // +optional - VolumeSnapshotClassName *string `json:"volumeSnapshotClassName,omitempty" protobuf:"bytes,4,opt,name=volumeSnapshotClassName"` - - // source specifies whether the snapshot is (or should be) dynamically provisioned - // or already exists, and just requires a Kubernetes object representation. - // This field is immutable after creation. - // Required. - Source VolumeSnapshotContentSource `json:"source" protobuf:"bytes,5,opt,name=source"` - - // SourceVolumeMode is the mode of the volume whose snapshot is taken. - // Can be either “Filesystem” or “Block”. - // If not specified, it indicates the source volume's mode is unknown. - // This field is immutable. - // This field is an alpha field. - // +optional - SourceVolumeMode *core_v1.PersistentVolumeMode `json:"sourceVolumeMode" protobuf:"bytes,6,opt,name=sourceVolumeMode"` -} - -// VolumeSnapshotContentSource represents the CSI source of a snapshot. -// Exactly one of its members must be set. -// Members in VolumeSnapshotContentSource are immutable. -type VolumeSnapshotContentSource struct { - // volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot - // should be dynamically taken from. - // This field is immutable. - // +optional - VolumeHandle *string `json:"volumeHandle,omitempty" protobuf:"bytes,1,opt,name=volumeHandle"` - - // snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on - // the underlying storage system for which a Kubernetes object representation - // was (or should be) created. - // This field is immutable. - // +optional - SnapshotHandle *string `json:"snapshotHandle,omitempty" protobuf:"bytes,2,opt,name=snapshotHandle"` -} - -// VolumeSnapshotContentStatus is the status of a VolumeSnapshotContent object -// Note that CreationTime, RestoreSize, ReadyToUse, and Error are in both -// VolumeSnapshotStatus and VolumeSnapshotContentStatus. Fields in VolumeSnapshotStatus -// are updated based on fields in VolumeSnapshotContentStatus. They are eventual -// consistency. These fields are duplicate in both objects due to the following reasons: -// - Fields in VolumeSnapshotContentStatus can be used for filtering when importing a -// volumesnapshot. -// - VolumsnapshotStatus is used by end users because they cannot see VolumeSnapshotContent. -// - CSI snapshotter sidecar is light weight as it only watches VolumeSnapshotContent -// object, not VolumeSnapshot object. -type VolumeSnapshotContentStatus struct { - // snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. - // If not specified, it indicates that dynamic snapshot creation has either failed - // or it is still in progress. - // +optional - SnapshotHandle *string `json:"snapshotHandle,omitempty" protobuf:"bytes,1,opt,name=snapshotHandle"` - - // creationTime is the timestamp when the point-in-time snapshot is taken - // by the underlying storage system. - // In dynamic snapshot creation case, this field will be filled in by the - // CSI snapshotter sidecar with the "creation_time" value returned from CSI - // "CreateSnapshot" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "creation_time" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - // If not specified, it indicates the creation time is unknown. - // The format of this field is a Unix nanoseconds time encoded as an int64. - // On Unix, the command `date +%s%N` returns the current time in nanoseconds - // since 1970-01-01 00:00:00 UTC. - // +optional - CreationTime *int64 `json:"creationTime,omitempty" protobuf:"varint,2,opt,name=creationTime"` - - // restoreSize represents the complete size of the snapshot in bytes. - // In dynamic snapshot creation case, this field will be filled in by the - // CSI snapshotter sidecar with the "size_bytes" value returned from CSI - // "CreateSnapshot" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "size_bytes" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - // When restoring a volume from this snapshot, the size of the volume MUST NOT - // be smaller than the restoreSize if it is specified, otherwise the restoration will fail. - // If not specified, it indicates that the size is unknown. - // +kubebuilder:validation:Minimum=0 - // +optional - RestoreSize *int64 `json:"restoreSize,omitempty" protobuf:"bytes,3,opt,name=restoreSize"` - - // readyToUse indicates if a snapshot is ready to be used to restore a volume. - // In dynamic snapshot creation case, this field will be filled in by the - // CSI snapshotter sidecar with the "ready_to_use" value returned from CSI - // "CreateSnapshot" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "ready_to_use" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, - // otherwise, this field will be set to "True". - // If not specified, it means the readiness of a snapshot is unknown. - // +optional. - ReadyToUse *bool `json:"readyToUse,omitempty" protobuf:"varint,4,opt,name=readyToUse"` - - // error is the last observed error during snapshot creation, if any. - // Upon success after retry, this error field will be cleared. - // +optional - Error *VolumeSnapshotError `json:"error,omitempty" protobuf:"bytes,5,opt,name=error,casttype=VolumeSnapshotError"` - - // VolumeGroupSnapshotHandle is the CSI "group_snapshot_id" of a group snapshot - // on the underlying storage system. - // +optional - VolumeGroupSnapshotHandle *string `json:"volumeGroupSnapshotHandle,omitempty" protobuf:"bytes,6,opt,name=volumeGroupSnapshotHandle"` -} - -// DeletionPolicy describes a policy for end-of-life maintenance of volume snapshot contents -// +kubebuilder:validation:Enum=Delete;Retain -type DeletionPolicy string - -const ( - // volumeSnapshotContentDelete means the snapshot will be deleted from the - // underlying storage system on release from its volume snapshot. - VolumeSnapshotContentDelete DeletionPolicy = "Delete" - - // volumeSnapshotContentRetain means the snapshot will be left in its current - // state on release from its volume snapshot. - VolumeSnapshotContentRetain DeletionPolicy = "Retain" -) - -// VolumeSnapshotError describes an error encountered during snapshot creation. -type VolumeSnapshotError struct { - // time is the timestamp when the error was encountered. - // +optional - Time *metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - - // message is a string detailing the encountered error during snapshot - // creation if specified. - // NOTE: message may be logged, and it should not contain sensitive - // information. - // +optional - Message *string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/doc.go similarity index 85% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/doc.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/doc.go index 462e9d485..f2fdf6b8b 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/doc.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +groupName=snapshot.storage.k8s.io +// +groupName=groupsnapshot.storage.k8s.io -package v1 +package v1beta1 diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/register.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/register.go similarity index 82% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/register.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/register.go index cca3da0b9..22cebf945 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/register.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -11,7 +11,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,7 +20,7 @@ import ( ) // GroupName is the group name use in this package. -const GroupName = "snapshot.storage.k8s.io" +const GroupName = "groupsnapshot.storage.k8s.io" var ( // SchemeBuilder is the new scheme builder @@ -28,10 +28,9 @@ var ( // AddToScheme adds to scheme AddToScheme = SchemeBuilder.AddToScheme // SchemeGroupVersion is the group version used to register these objects. - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} ) -// Resource takes an unqualified resource and returns a Group-qualified GroupResource. func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -46,12 +45,12 @@ func init() { // addKnownTypes adds the set of types defined in this package to the supplied scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &VolumeSnapshotClass{}, - &VolumeSnapshotClassList{}, - &VolumeSnapshot{}, - &VolumeSnapshotList{}, - &VolumeSnapshotContent{}, - &VolumeSnapshotContentList{}, + &VolumeGroupSnapshotClass{}, + &VolumeGroupSnapshotClassList{}, + &VolumeGroupSnapshot{}, + &VolumeGroupSnapshotList{}, + &VolumeGroupSnapshotContent{}, + &VolumeGroupSnapshotContentList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/types.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/types.go new file mode 100644 index 000000000..187c23f78 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/types.go @@ -0,0 +1,404 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// +kubebuilder:object:generate=true +package v1beta1 + +import ( + core_v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" +) + +// VolumeGroupSnapshotSpec defines the desired state of a volume group snapshot. +type VolumeGroupSnapshotSpec struct { + // Source specifies where a group snapshot will be created from. + // This field is immutable after creation. + // Required. + Source VolumeGroupSnapshotSource `json:"source" protobuf:"bytes,1,opt,name=source"` + + // VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass + // requested by the VolumeGroupSnapshot. + // VolumeGroupSnapshotClassName may be left nil to indicate that the default + // class will be used. + // Empty string is not allowed for this field. + // +optional + // +kubebuilder:validation:XValidation:rule="size(self) > 0",message="volumeGroupSnapshotClassName must not be the empty string when set" + VolumeGroupSnapshotClassName *string `json:"volumeGroupSnapshotClassName,omitempty" protobuf:"bytes,2,opt,name=volumeGroupSnapshotClassName"` +} + +// VolumeGroupSnapshotSource specifies whether the underlying group snapshot should be +// dynamically taken upon creation or if a pre-existing VolumeGroupSnapshotContent +// object should be used. +// Exactly one of its members must be set. +// Members in VolumeGroupSnapshotSource are immutable. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.selector) || has(self.selector)", message="selector is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.volumeGroupSnapshotContentName) || has(self.volumeGroupSnapshotContentName)", message="volumeGroupSnapshotContentName is required once set" +// +kubebuilder:validation:XValidation:rule="(has(self.selector) && !has(self.volumeGroupSnapshotContentName)) || (!has(self.selector) && has(self.volumeGroupSnapshotContentName))", message="exactly one of selector and volumeGroupSnapshotContentName must be set" +type VolumeGroupSnapshotSource struct { + // Selector is a label query over persistent volume claims that are to be + // grouped together for snapshotting. + // This labelSelector will be used to match the label added to a PVC. + // If the label is added or removed to a volume after a group snapshot + // is created, the existing group snapshots won't be modified. + // Once a VolumeGroupSnapshotContent is created and the sidecar starts to process + // it, the volume list will not change with retries. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="selector is immutable" + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + + // VolumeGroupSnapshotContentName specifies the name of a pre-existing VolumeGroupSnapshotContent + // object representing an existing volume group snapshot. + // This field should be set if the volume group snapshot already exists and + // only needs a representation in Kubernetes. + // This field is immutable. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeGroupSnapshotContentName is immutable" + VolumeGroupSnapshotContentName *string `json:"volumeGroupSnapshotContentName,omitempty" protobuf:"bytes,2,opt,name=volumeGroupSnapshotContentName"` +} + +// VolumeGroupSnapshotStatus defines the observed state of volume group snapshot. +type VolumeGroupSnapshotStatus struct { + // BoundVolumeGroupSnapshotContentName is the name of the VolumeGroupSnapshotContent + // object to which this VolumeGroupSnapshot object intends to bind to. + // If not specified, it indicates that the VolumeGroupSnapshot object has not + // been successfully bound to a VolumeGroupSnapshotContent object yet. + // NOTE: To avoid possible security issues, consumers must verify binding between + // VolumeGroupSnapshot and VolumeGroupSnapshotContent objects is successful + // (by validating that both VolumeGroupSnapshot and VolumeGroupSnapshotContent + // point at each other) before using this object. + // +optional + BoundVolumeGroupSnapshotContentName *string `json:"boundVolumeGroupSnapshotContentName,omitempty" protobuf:"bytes,1,opt,name=boundVolumeGroupSnapshotContentName"` + + // CreationTime is the timestamp when the point-in-time group snapshot is taken + // by the underlying storage system. + // If not specified, it may indicate that the creation time of the group snapshot + // is unknown. + // The format of this field is a Unix nanoseconds time encoded as an int64. + // On Unix, the command date +%s%N returns the current time in nanoseconds + // since 1970-01-01 00:00:00 UTC. + // This field is updated based on the CreationTime field in VolumeGroupSnapshotContentStatus + // +optional + CreationTime *metav1.Time `json:"creationTime,omitempty" protobuf:"bytes,2,opt,name=creationTime"` + + // ReadyToUse indicates if all the individual snapshots in the group are ready + // to be used to restore a group of volumes. + // ReadyToUse becomes true when ReadyToUse of all individual snapshots become true. + // If not specified, it means the readiness of a group snapshot is unknown. + // +optional + ReadyToUse *bool `json:"readyToUse,omitempty" protobuf:"varint,3,opt,name=readyToUse"` + + // Error is the last observed error during group snapshot creation, if any. + // This field could be helpful to upper level controllers (i.e., application + // controller) to decide whether they should continue on waiting for the group + // snapshot to be created based on the type of error reported. + // The snapshot controller will keep retrying when an error occurs during the + // group snapshot creation. Upon success, this error field will be cleared. + // +optional + Error *snapshotv1.VolumeSnapshotError `json:"error,omitempty" protobuf:"bytes,4,opt,name=error,casttype=VolumeSnapshotError"` +} + +//+genclient +//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeGroupSnapshot is a user's request for creating either a point-in-time +// group snapshot or binding to a pre-existing group snapshot. +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced,shortName=vgs +// +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion +// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if all the individual snapshots in the group are ready to be used to restore a group of volumes." +// +kubebuilder:printcolumn:name="VolumeGroupSnapshotClass",type=string,JSONPath=`.spec.volumeGroupSnapshotClassName`,description="The name of the VolumeGroupSnapshotClass requested by the VolumeGroupSnapshot." +// +kubebuilder:printcolumn:name="VolumeGroupSnapshotContent",type=string,JSONPath=`.status.boundVolumeGroupSnapshotContentName`,description="Name of the VolumeGroupSnapshotContent object to which the VolumeGroupSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeGroupSnapshot and VolumeGroupSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object." +// +kubebuilder:printcolumn:name="CreationTime",type=date,JSONPath=`.status.creationTime`,description="Timestamp when the point-in-time group snapshot was taken by the underlying storage system." +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +type VolumeGroupSnapshot struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired characteristics of a group snapshot requested by a user. + // Required. + Spec VolumeGroupSnapshotSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // Status represents the current information of a group snapshot. + // Consumers must verify binding between VolumeGroupSnapshot and + // VolumeGroupSnapshotContent objects is successful (by validating that both + // VolumeGroupSnapshot and VolumeGroupSnapshotContent point to each other) before + // using this object. + // +optional + Status *VolumeGroupSnapshotStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// VolumeGroupSnapshotList contains a list of VolumeGroupSnapshot objects. +type VolumeGroupSnapshotList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of VolumeGroupSnapshots. + Items []VolumeGroupSnapshot `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +//+genclient +//+genclient:nonNamespaced +//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeGroupSnapshotClass specifies parameters that a underlying storage system +// uses when creating a volume group snapshot. A specific VolumeGroupSnapshotClass +// is used by specifying its name in a VolumeGroupSnapshot object. +// VolumeGroupSnapshotClasses are non-namespaced. +// +kubebuilder:object:root=true +// +kubebuilder:deprecatedversion +// +kubebuilder:resource:scope=Cluster,shortName=vgsclass;vgsclasses +// +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.driver` +// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.deletionPolicy`,description="Determines whether a VolumeGroupSnapshotContent created through the VolumeGroupSnapshotClass should be deleted when its bound VolumeGroupSnapshot is deleted." +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +type VolumeGroupSnapshotClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Driver is the name of the storage driver expected to handle this VolumeGroupSnapshotClass. + // Required. + Driver string `json:"driver" protobuf:"bytes,2,opt,name=driver"` + + // Parameters is a key-value map with storage driver specific parameters for + // creating group snapshots. + // These values are opaque to Kubernetes and are passed directly to the driver. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` + + // DeletionPolicy determines whether a VolumeGroupSnapshotContent created + // through the VolumeGroupSnapshotClass should be deleted when its bound + // VolumeGroupSnapshot is deleted. + // Supported values are "Retain" and "Delete". + // "Retain" means that the VolumeGroupSnapshotContent and its physical group + // snapshot on underlying storage system are kept. + // "Delete" means that the VolumeGroupSnapshotContent and its physical group + // snapshot on underlying storage system are deleted. + // Required. + DeletionPolicy snapshotv1.DeletionPolicy `json:"deletionPolicy" protobuf:"bytes,4,opt,name=deletionPolicy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeGroupSnapshotClassList is a collection of VolumeGroupSnapshotClasses. +// +kubebuilder:object:root=true +type VolumeGroupSnapshotClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeGroupSnapshotClasses. + Items []VolumeGroupSnapshotClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeGroupSnapshotContent represents the actual "on-disk" group snapshot object +// in the underlying storage system +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster,shortName=vgsc;vgscs +// +kubebuilder:deprecatedversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if all the individual snapshots in the group are ready to be used to restore a group of volumes." +// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.spec.deletionPolicy`,description="Determines whether this VolumeGroupSnapshotContent and its physical group snapshot on the underlying storage system should be deleted when its bound VolumeGroupSnapshot is deleted." +// +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.spec.driver`,description="Name of the CSI driver used to create the physical group snapshot on the underlying storage system." +// +kubebuilder:printcolumn:name="VolumeGroupSnapshotClass",type=string,JSONPath=`.spec.volumeGroupSnapshotClassName`,description="Name of the VolumeGroupSnapshotClass from which this group snapshot was (or will be) created." +// +kubebuilder:printcolumn:name="VolumeGroupSnapshotNamespace",type=string,JSONPath=`.spec.volumeGroupSnapshotRef.namespace`,description="Namespace of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent object is bound." +// +kubebuilder:printcolumn:name="VolumeGroupSnapshot",type=string,JSONPath=`.spec.volumeGroupSnapshotRef.name`,description="Name of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent object is bound." +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +type VolumeGroupSnapshotContent struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines properties of a VolumeGroupSnapshotContent created by the underlying storage system. + // Required. + Spec VolumeGroupSnapshotContentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status represents the current information of a group snapshot. + // +optional + Status *VolumeGroupSnapshotContentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeGroupSnapshotContentList is a list of VolumeGroupSnapshotContent objects +// +kubebuilder:object:root=true +type VolumeGroupSnapshotContentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeGroupSnapshotContents. + Items []VolumeGroupSnapshotContent `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeGroupSnapshotContentSpec describes the common attributes of a group snapshot content +type VolumeGroupSnapshotContentSpec struct { + // VolumeGroupSnapshotRef specifies the VolumeGroupSnapshot object to which this + // VolumeGroupSnapshotContent object is bound. + // VolumeGroupSnapshot.Spec.VolumeGroupSnapshotContentName field must reference to + // this VolumeGroupSnapshotContent's name for the bidirectional binding to be valid. + // For a pre-existing VolumeGroupSnapshotContent object, name and namespace of the + // VolumeGroupSnapshot object MUST be provided for binding to happen. + // This field is immutable after creation. + // Required. + // +kubebuilder:validation:XValidation:rule="has(self.name) && has(self.__namespace__)",message="both volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace must be set" + VolumeGroupSnapshotRef core_v1.ObjectReference `json:"volumeGroupSnapshotRef" protobuf:"bytes,1,opt,name=volumeGroupSnapshotRef"` + + // DeletionPolicy determines whether this VolumeGroupSnapshotContent and the + // physical group snapshot on the underlying storage system should be deleted + // when the bound VolumeGroupSnapshot is deleted. + // Supported values are "Retain" and "Delete". + // "Retain" means that the VolumeGroupSnapshotContent and its physical group + // snapshot on underlying storage system are kept. + // "Delete" means that the VolumeGroupSnapshotContent and its physical group + // snapshot on underlying storage system are deleted. + // For dynamically provisioned group snapshots, this field will automatically + // be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field + // defined in the corresponding VolumeGroupSnapshotClass. + // For pre-existing snapshots, users MUST specify this field when creating the + // VolumeGroupSnapshotContent object. + // Required. + DeletionPolicy snapshotv1.DeletionPolicy `json:"deletionPolicy" protobuf:"bytes,2,opt,name=deletionPolicy"` + + // Driver is the name of the CSI driver used to create the physical group snapshot on + // the underlying storage system. + // This MUST be the same as the name returned by the CSI GetPluginName() call for + // that driver. + // Required. + Driver string `json:"driver" protobuf:"bytes,3,opt,name=driver"` + + // VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass from + // which this group snapshot was (or will be) created. + // Note that after provisioning, the VolumeGroupSnapshotClass may be deleted or + // recreated with different set of values, and as such, should not be referenced + // post-snapshot creation. + // For dynamic provisioning, this field must be set. + // This field may be unset for pre-provisioned snapshots. + // +optional + VolumeGroupSnapshotClassName *string `json:"volumeGroupSnapshotClassName,omitempty" protobuf:"bytes,4,opt,name=volumeGroupSnapshotClassName"` + + // Source specifies whether the snapshot is (or should be) dynamically provisioned + // or already exists, and just requires a Kubernetes object representation. + // This field is immutable after creation. + // Required. + Source VolumeGroupSnapshotContentSource `json:"source" protobuf:"bytes,5,opt,name=source"` +} + +// VolumeGroupSnapshotContentStatus defines the observed state of VolumeGroupSnapshotContent. +type VolumeGroupSnapshotContentStatus struct { + // VolumeGroupSnapshotHandle is a unique id returned by the CSI driver + // to identify the VolumeGroupSnapshot on the storage system. + // If a storage system does not provide such an id, the + // CSI driver can choose to return the VolumeGroupSnapshot name. + // +optional + VolumeGroupSnapshotHandle *string `json:"volumeGroupSnapshotHandle,omitempty" protobuf:"bytes,1,opt,name=volumeGroupSnapshotHandle"` + + // CreationTime is the timestamp when the point-in-time group snapshot is taken + // by the underlying storage system. + // If not specified, it indicates the creation time is unknown. + // If not specified, it means the readiness of a group snapshot is unknown. + // The format of this field is a Unix nanoseconds time encoded as an int64. + // On Unix, the command date +%s%N returns the current time in nanoseconds + // since 1970-01-01 00:00:00 UTC. + // This field is the source for the CreationTime field in VolumeGroupSnapshotStatus + // +optional + CreationTime *metav1.Time `json:"creationTime,omitempty" protobuf:"bytes,2,opt,name=creationTime"` + + // ReadyToUse indicates if all the individual snapshots in the group are ready to be + // used to restore a group of volumes. + // ReadyToUse becomes true when ReadyToUse of all individual snapshots become true. + // +optional + ReadyToUse *bool `json:"readyToUse,omitempty" protobuf:"varint,3,opt,name=readyToUse"` + + // Error is the last observed error during group snapshot creation, if any. + // Upon success after retry, this error field will be cleared. + // +optional + Error *snapshotv1.VolumeSnapshotError `json:"error,omitempty" protobuf:"bytes,4,opt,name=error,casttype=VolumeSnapshotError"` + + // VolumeSnapshotHandlePairList is a list of CSI "volume_id" and "snapshot_id" + // pair returned by the CSI driver to identify snapshots and their source volumes + // on the storage system. + // +optional + VolumeSnapshotHandlePairList []VolumeSnapshotHandlePair `json:"volumeSnapshotHandlePairList,omitempty" protobuf:"bytes,6,opt,name=volumeSnapshotHandlePairList"` +} + +// VolumeGroupSnapshotContentSource represents the CSI source of a group snapshot. +// Exactly one of its members must be set. +// Members in VolumeGroupSnapshotContentSource are immutable. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.volumeHandles) || has(self.volumeHandles)", message="volumeHandles is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.groupSnapshotHandles) || has(self.groupSnapshotHandles)", message="groupSnapshotHandles is required once set" +// +kubebuilder:validation:XValidation:rule="(has(self.volumeHandles) && !has(self.groupSnapshotHandles)) || (!has(self.volumeHandles) && has(self.groupSnapshotHandles))", message="exactly one of volumeHandles and groupSnapshotHandles must be set" +type VolumeGroupSnapshotContentSource struct { + // VolumeHandles is a list of volume handles on the backend to be snapshotted + // together. It is specified for dynamic provisioning of the VolumeGroupSnapshot. + // This field is immutable. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeHandles is immutable" + VolumeHandles []string `json:"volumeHandles,omitempty" protobuf:"bytes,1,opt,name=volumeHandles"` + + // GroupSnapshotHandles specifies the CSI "group_snapshot_id" of a pre-existing + // group snapshot and a list of CSI "snapshot_id" of pre-existing snapshots + // on the underlying storage system for which a Kubernetes object + // representation was (or should be) created. + // This field is immutable. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="groupSnapshotHandles is immutable" + GroupSnapshotHandles *GroupSnapshotHandles `json:"groupSnapshotHandles,omitempty" protobuf:"bytes,2,opt,name=groupSnapshotHandles"` +} + +type GroupSnapshotHandles struct { + // VolumeGroupSnapshotHandle specifies the CSI "group_snapshot_id" of a pre-existing + // group snapshot on the underlying storage system for which a Kubernetes object + // representation was (or should be) created. + // This field is immutable. + // Required. + VolumeGroupSnapshotHandle string `json:"volumeGroupSnapshotHandle" protobuf:"bytes,1,opt,name=volumeGroupSnapshotHandle"` + + // VolumeSnapshotHandles is a list of CSI "snapshot_id" of pre-existing + // snapshots on the underlying storage system for which Kubernetes objects + // representation were (or should be) created. + // This field is immutable. + // Required. + VolumeSnapshotHandles []string `json:"volumeSnapshotHandles" protobuf:"bytes,2,opt,name=volumeSnapshotHandles"` +} + +// VolumeSnapshotHandlePair defines a pair of a source volume handle and a snapshot handle +type VolumeSnapshotHandlePair struct { + // VolumeHandle is a unique id returned by the CSI driver to identify a volume + // on the storage system + // Required. + VolumeHandle string `json:"volumeHandle" protobuf:"bytes,1,opt,name=volumeHandle"` + + // SnapshotHandle is a unique id returned by the CSI driver to identify a volume + // snapshot on the storage system + // Required. + SnapshotHandle string `json:"snapshotHandle" protobuf:"bytes,2,opt,name=snapshotHandle"` +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/zz_generated.deepcopy.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/zz_generated.deepcopy.go similarity index 55% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/zz_generated.deepcopy.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/zz_generated.deepcopy.go index a590aef06..4c59130df 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1/zz_generated.deepcopy.go @@ -19,39 +19,61 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1 +package v1beta1 import ( - corev1 "k8s.io/api/core/v1" + v1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshot) DeepCopyInto(out *VolumeSnapshot) { +func (in *GroupSnapshotHandles) DeepCopyInto(out *GroupSnapshotHandles) { + *out = *in + if in.VolumeSnapshotHandles != nil { + in, out := &in.VolumeSnapshotHandles, &out.VolumeSnapshotHandles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSnapshotHandles. +func (in *GroupSnapshotHandles) DeepCopy() *GroupSnapshotHandles { + if in == nil { + return nil + } + out := new(GroupSnapshotHandles) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeGroupSnapshot) DeepCopyInto(out *VolumeGroupSnapshot) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status - *out = new(VolumeSnapshotStatus) + *out = new(VolumeGroupSnapshotStatus) (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshot. -func (in *VolumeSnapshot) DeepCopy() *VolumeSnapshot { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshot. +func (in *VolumeGroupSnapshot) DeepCopy() *VolumeGroupSnapshot { if in == nil { return nil } - out := new(VolumeSnapshot) + out := new(VolumeGroupSnapshot) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshot) DeepCopyObject() runtime.Object { +func (in *VolumeGroupSnapshot) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -59,7 +81,7 @@ func (in *VolumeSnapshot) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotClass) DeepCopyInto(out *VolumeSnapshotClass) { +func (in *VolumeGroupSnapshotClass) DeepCopyInto(out *VolumeGroupSnapshotClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -73,18 +95,18 @@ func (in *VolumeSnapshotClass) DeepCopyInto(out *VolumeSnapshotClass) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotClass. -func (in *VolumeSnapshotClass) DeepCopy() *VolumeSnapshotClass { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotClass. +func (in *VolumeGroupSnapshotClass) DeepCopy() *VolumeGroupSnapshotClass { if in == nil { return nil } - out := new(VolumeSnapshotClass) + out := new(VolumeGroupSnapshotClass) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotClass) DeepCopyObject() runtime.Object { +func (in *VolumeGroupSnapshotClass) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -92,13 +114,13 @@ func (in *VolumeSnapshotClass) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotClassList) DeepCopyInto(out *VolumeSnapshotClassList) { +func (in *VolumeGroupSnapshotClassList) DeepCopyInto(out *VolumeGroupSnapshotClassList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]VolumeSnapshotClass, len(*in)) + *out = make([]VolumeGroupSnapshotClass, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -106,18 +128,18 @@ func (in *VolumeSnapshotClassList) DeepCopyInto(out *VolumeSnapshotClassList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotClassList. -func (in *VolumeSnapshotClassList) DeepCopy() *VolumeSnapshotClassList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotClassList. +func (in *VolumeGroupSnapshotClassList) DeepCopy() *VolumeGroupSnapshotClassList { if in == nil { return nil } - out := new(VolumeSnapshotClassList) + out := new(VolumeGroupSnapshotClassList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotClassList) DeepCopyObject() runtime.Object { +func (in *VolumeGroupSnapshotClassList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -125,31 +147,31 @@ func (in *VolumeSnapshotClassList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContent) DeepCopyInto(out *VolumeSnapshotContent) { +func (in *VolumeGroupSnapshotContent) DeepCopyInto(out *VolumeGroupSnapshotContent) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status - *out = new(VolumeSnapshotContentStatus) + *out = new(VolumeGroupSnapshotContentStatus) (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContent. -func (in *VolumeSnapshotContent) DeepCopy() *VolumeSnapshotContent { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContent. +func (in *VolumeGroupSnapshotContent) DeepCopy() *VolumeGroupSnapshotContent { if in == nil { return nil } - out := new(VolumeSnapshotContent) + out := new(VolumeGroupSnapshotContent) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotContent) DeepCopyObject() runtime.Object { +func (in *VolumeGroupSnapshotContent) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -157,13 +179,13 @@ func (in *VolumeSnapshotContent) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContentList) DeepCopyInto(out *VolumeSnapshotContentList) { +func (in *VolumeGroupSnapshotContentList) DeepCopyInto(out *VolumeGroupSnapshotContentList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]VolumeSnapshotContent, len(*in)) + *out = make([]VolumeGroupSnapshotContent, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -171,18 +193,18 @@ func (in *VolumeSnapshotContentList) DeepCopyInto(out *VolumeSnapshotContentList return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentList. -func (in *VolumeSnapshotContentList) DeepCopy() *VolumeSnapshotContentList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContentList. +func (in *VolumeGroupSnapshotContentList) DeepCopy() *VolumeGroupSnapshotContentList { if in == nil { return nil } - out := new(VolumeSnapshotContentList) + out := new(VolumeGroupSnapshotContentList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotContentList) DeepCopyObject() runtime.Object { +func (in *VolumeGroupSnapshotContentList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -190,76 +212,65 @@ func (in *VolumeSnapshotContentList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContentSource) DeepCopyInto(out *VolumeSnapshotContentSource) { +func (in *VolumeGroupSnapshotContentSource) DeepCopyInto(out *VolumeGroupSnapshotContentSource) { *out = *in - if in.VolumeHandle != nil { - in, out := &in.VolumeHandle, &out.VolumeHandle - *out = new(string) - **out = **in - } - if in.SnapshotHandle != nil { - in, out := &in.SnapshotHandle, &out.SnapshotHandle - *out = new(string) - **out = **in + if in.VolumeHandles != nil { + in, out := &in.VolumeHandles, &out.VolumeHandles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupSnapshotHandles != nil { + in, out := &in.GroupSnapshotHandles, &out.GroupSnapshotHandles + *out = new(GroupSnapshotHandles) + (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentSource. -func (in *VolumeSnapshotContentSource) DeepCopy() *VolumeSnapshotContentSource { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContentSource. +func (in *VolumeGroupSnapshotContentSource) DeepCopy() *VolumeGroupSnapshotContentSource { if in == nil { return nil } - out := new(VolumeSnapshotContentSource) + out := new(VolumeGroupSnapshotContentSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContentSpec) DeepCopyInto(out *VolumeSnapshotContentSpec) { +func (in *VolumeGroupSnapshotContentSpec) DeepCopyInto(out *VolumeGroupSnapshotContentSpec) { *out = *in - out.VolumeSnapshotRef = in.VolumeSnapshotRef - if in.VolumeSnapshotClassName != nil { - in, out := &in.VolumeSnapshotClassName, &out.VolumeSnapshotClassName + out.VolumeGroupSnapshotRef = in.VolumeGroupSnapshotRef + if in.VolumeGroupSnapshotClassName != nil { + in, out := &in.VolumeGroupSnapshotClassName, &out.VolumeGroupSnapshotClassName *out = new(string) **out = **in } in.Source.DeepCopyInto(&out.Source) - if in.SourceVolumeMode != nil { - in, out := &in.SourceVolumeMode, &out.SourceVolumeMode - *out = new(corev1.PersistentVolumeMode) - **out = **in - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentSpec. -func (in *VolumeSnapshotContentSpec) DeepCopy() *VolumeSnapshotContentSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContentSpec. +func (in *VolumeGroupSnapshotContentSpec) DeepCopy() *VolumeGroupSnapshotContentSpec { if in == nil { return nil } - out := new(VolumeSnapshotContentSpec) + out := new(VolumeGroupSnapshotContentSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContentStatus) DeepCopyInto(out *VolumeSnapshotContentStatus) { +func (in *VolumeGroupSnapshotContentStatus) DeepCopyInto(out *VolumeGroupSnapshotContentStatus) { *out = *in - if in.SnapshotHandle != nil { - in, out := &in.SnapshotHandle, &out.SnapshotHandle + if in.VolumeGroupSnapshotHandle != nil { + in, out := &in.VolumeGroupSnapshotHandle, &out.VolumeGroupSnapshotHandle *out = new(string) **out = **in } if in.CreationTime != nil { in, out := &in.CreationTime, &out.CreationTime - *out = new(int64) - **out = **in - } - if in.RestoreSize != nil { - in, out := &in.RestoreSize, &out.RestoreSize - *out = new(int64) - **out = **in + *out = (*in).DeepCopy() } if in.ReadyToUse != nil { in, out := &in.ReadyToUse, &out.ReadyToUse @@ -268,60 +279,35 @@ func (in *VolumeSnapshotContentStatus) DeepCopyInto(out *VolumeSnapshotContentSt } if in.Error != nil { in, out := &in.Error, &out.Error - *out = new(VolumeSnapshotError) + *out = new(v1.VolumeSnapshotError) (*in).DeepCopyInto(*out) } - if in.VolumeGroupSnapshotHandle != nil { - in, out := &in.VolumeGroupSnapshotHandle, &out.VolumeGroupSnapshotHandle - *out = new(string) - **out = **in + if in.VolumeSnapshotHandlePairList != nil { + in, out := &in.VolumeSnapshotHandlePairList, &out.VolumeSnapshotHandlePairList + *out = make([]VolumeSnapshotHandlePair, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentStatus. -func (in *VolumeSnapshotContentStatus) DeepCopy() *VolumeSnapshotContentStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContentStatus. +func (in *VolumeGroupSnapshotContentStatus) DeepCopy() *VolumeGroupSnapshotContentStatus { if in == nil { return nil } - out := new(VolumeSnapshotContentStatus) + out := new(VolumeGroupSnapshotContentStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotError) DeepCopyInto(out *VolumeSnapshotError) { - *out = *in - if in.Time != nil { - in, out := &in.Time, &out.Time - *out = (*in).DeepCopy() - } - if in.Message != nil { - in, out := &in.Message, &out.Message - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotError. -func (in *VolumeSnapshotError) DeepCopy() *VolumeSnapshotError { - if in == nil { - return nil - } - out := new(VolumeSnapshotError) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotList) DeepCopyInto(out *VolumeSnapshotList) { +func (in *VolumeGroupSnapshotList) DeepCopyInto(out *VolumeGroupSnapshotList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]VolumeSnapshot, len(*in)) + *out = make([]VolumeGroupSnapshot, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -329,18 +315,18 @@ func (in *VolumeSnapshotList) DeepCopyInto(out *VolumeSnapshotList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotList. -func (in *VolumeSnapshotList) DeepCopy() *VolumeSnapshotList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotList. +func (in *VolumeGroupSnapshotList) DeepCopy() *VolumeGroupSnapshotList { if in == nil { return nil } - out := new(VolumeSnapshotList) + out := new(VolumeGroupSnapshotList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotList) DeepCopyObject() runtime.Object { +func (in *VolumeGroupSnapshotList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -348,58 +334,58 @@ func (in *VolumeSnapshotList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotSource) DeepCopyInto(out *VolumeSnapshotSource) { +func (in *VolumeGroupSnapshotSource) DeepCopyInto(out *VolumeGroupSnapshotSource) { *out = *in - if in.PersistentVolumeClaimName != nil { - in, out := &in.PersistentVolumeClaimName, &out.PersistentVolumeClaimName - *out = new(string) - **out = **in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) } - if in.VolumeSnapshotContentName != nil { - in, out := &in.VolumeSnapshotContentName, &out.VolumeSnapshotContentName + if in.VolumeGroupSnapshotContentName != nil { + in, out := &in.VolumeGroupSnapshotContentName, &out.VolumeGroupSnapshotContentName *out = new(string) **out = **in } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotSource. -func (in *VolumeSnapshotSource) DeepCopy() *VolumeSnapshotSource { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotSource. +func (in *VolumeGroupSnapshotSource) DeepCopy() *VolumeGroupSnapshotSource { if in == nil { return nil } - out := new(VolumeSnapshotSource) + out := new(VolumeGroupSnapshotSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotSpec) DeepCopyInto(out *VolumeSnapshotSpec) { +func (in *VolumeGroupSnapshotSpec) DeepCopyInto(out *VolumeGroupSnapshotSpec) { *out = *in in.Source.DeepCopyInto(&out.Source) - if in.VolumeSnapshotClassName != nil { - in, out := &in.VolumeSnapshotClassName, &out.VolumeSnapshotClassName + if in.VolumeGroupSnapshotClassName != nil { + in, out := &in.VolumeGroupSnapshotClassName, &out.VolumeGroupSnapshotClassName *out = new(string) **out = **in } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotSpec. -func (in *VolumeSnapshotSpec) DeepCopy() *VolumeSnapshotSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotSpec. +func (in *VolumeGroupSnapshotSpec) DeepCopy() *VolumeGroupSnapshotSpec { if in == nil { return nil } - out := new(VolumeSnapshotSpec) + out := new(VolumeGroupSnapshotSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotStatus) DeepCopyInto(out *VolumeSnapshotStatus) { +func (in *VolumeGroupSnapshotStatus) DeepCopyInto(out *VolumeGroupSnapshotStatus) { *out = *in - if in.BoundVolumeSnapshotContentName != nil { - in, out := &in.BoundVolumeSnapshotContentName, &out.BoundVolumeSnapshotContentName + if in.BoundVolumeGroupSnapshotContentName != nil { + in, out := &in.BoundVolumeGroupSnapshotContentName, &out.BoundVolumeGroupSnapshotContentName *out = new(string) **out = **in } @@ -412,30 +398,36 @@ func (in *VolumeSnapshotStatus) DeepCopyInto(out *VolumeSnapshotStatus) { *out = new(bool) **out = **in } - if in.RestoreSize != nil { - in, out := &in.RestoreSize, &out.RestoreSize - x := (*in).DeepCopy() - *out = &x - } if in.Error != nil { in, out := &in.Error, &out.Error - *out = new(VolumeSnapshotError) + *out = new(v1.VolumeSnapshotError) (*in).DeepCopyInto(*out) } - if in.VolumeGroupSnapshotName != nil { - in, out := &in.VolumeGroupSnapshotName, &out.VolumeGroupSnapshotName - *out = new(string) - **out = **in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotStatus. +func (in *VolumeGroupSnapshotStatus) DeepCopy() *VolumeGroupSnapshotStatus { + if in == nil { + return nil } + out := new(VolumeGroupSnapshotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotHandlePair) DeepCopyInto(out *VolumeSnapshotHandlePair) { + *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotStatus. -func (in *VolumeSnapshotStatus) DeepCopy() *VolumeSnapshotStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotHandlePair. +func (in *VolumeSnapshotHandlePair) DeepCopy() *VolumeSnapshotHandlePair { if in == nil { return nil } - out := new(VolumeSnapshotStatus) + out := new(VolumeSnapshotHandlePair) in.DeepCopyInto(out) return out } diff --git a/vendor/github.com/vmware-tanzu/velero/internal/credentials/file_store.go b/vendor/github.com/vmware-tanzu/velero/internal/credentials/file_store.go new file mode 100644 index 000000000..d1f1fb10a --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/internal/credentials/file_store.go @@ -0,0 +1,89 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/pkg/errors" + corev1api "k8s.io/api/core/v1" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/velero/pkg/util/filesystem" + "github.com/vmware-tanzu/velero/pkg/util/kube" +) + +// FileStore defines operations for interacting with credentials +// that are stored on a file system. +type FileStore interface { + // Path returns a path on disk where the secret key defined by + // the given selector is serialized. + Path(selector *corev1api.SecretKeySelector) (string, error) +} + +type namespacedFileStore struct { + client kbclient.Client + namespace string + fsRoot string + fs filesystem.Interface +} + +// NewNamespacedFileStore returns a FileStore which can interact with credentials +// for the given namespace and will store them under the given fsRoot. +func NewNamespacedFileStore(client kbclient.Client, namespace string, fsRoot string, fs filesystem.Interface) (FileStore, error) { + fsNamespaceRoot := filepath.Join(fsRoot, namespace) + + if err := fs.MkdirAll(fsNamespaceRoot, 0755); err != nil { + return nil, err + } + + return &namespacedFileStore{ + client: client, + namespace: namespace, + fsRoot: fsNamespaceRoot, + fs: fs, + }, nil +} + +// Path returns a path on disk where the secret key defined by +// the given selector is serialized. +func (n *namespacedFileStore) Path(selector *corev1api.SecretKeySelector) (string, error) { + creds, err := kube.GetSecretKey(n.client, n.namespace, selector) + if err != nil { + return "", errors.Wrap(err, "unable to get key for secret") + } + + keyFilePath := filepath.Join(n.fsRoot, fmt.Sprintf("%s-%s", selector.Name, selector.Key)) + + // owner RW perms, group R perms, no public perms + file, err := n.fs.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640) + if err != nil { + return "", errors.Wrap(err, "unable to open credentials file for writing") + } + + if _, err := file.Write(creds); err != nil { + return "", errors.Wrap(err, "unable to write credentials to store") + } + + if err := file.Close(); err != nil { + return "", errors.Wrap(err, "unable to close credentials file") + } + + return keyFilePath, nil +} diff --git a/vendor/github.com/vmware-tanzu/velero/internal/credentials/getter.go b/vendor/github.com/vmware-tanzu/velero/internal/credentials/getter.go new file mode 100644 index 000000000..c890c0566 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/internal/credentials/getter.go @@ -0,0 +1,24 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// CredentialGetter is a collection of interfaces for interacting with credentials +// that are stored in different targets +type CredentialGetter struct { + FromFile FileStore + FromSecret SecretStore +} diff --git a/vendor/github.com/vmware-tanzu/velero/internal/credentials/local.go b/vendor/github.com/vmware-tanzu/velero/internal/credentials/local.go new file mode 100644 index 000000000..00ccc3174 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/internal/credentials/local.go @@ -0,0 +1,7 @@ +package credentials + +import "os" + +func DefaultStoreDirectory() string { + return os.TempDir() + "/credentials" +} diff --git a/vendor/github.com/vmware-tanzu/velero/internal/credentials/secret_store.go b/vendor/github.com/vmware-tanzu/velero/internal/credentials/secret_store.go new file mode 100644 index 000000000..f4d2111a5 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/internal/credentials/secret_store.go @@ -0,0 +1,56 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "github.com/pkg/errors" + corev1api "k8s.io/api/core/v1" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/velero/pkg/util/kube" +) + +// SecretStore defines operations for interacting with credentials +// that are stored in Secret. +type SecretStore interface { + // Get returns the secret key defined by the given selector + Get(selector *corev1api.SecretKeySelector) (string, error) +} + +type namespacedSecretStore struct { + client kbclient.Client + namespace string +} + +// NewNamespacedSecretStore returns a SecretStore which can interact with credentials +// for the given namespace. +func NewNamespacedSecretStore(client kbclient.Client, namespace string) (SecretStore, error) { + return &namespacedSecretStore{ + client: client, + namespace: namespace, + }, nil +} + +// Buffer returns the secret key defined by the given selector. +func (n *namespacedSecretStore) Get(selector *corev1api.SecretKeySelector) (string, error) { + creds, err := kube.GetSecretKey(n.client, n.namespace, selector) + if err != nil { + return "", errors.Wrap(err, "unable to get key for secret") + } + + return string(creds), nil +} diff --git a/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/resource_policies.go b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/resource_policies.go new file mode 100644 index 000000000..d484cabce --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/resource_policies.go @@ -0,0 +1,296 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcepolicies + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + corev1api "k8s.io/api/core/v1" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +type VolumeActionType string + +const ( + // currently only support configmap type of resource config + ConfigmapRefType string = "configmap" + // skip action implies the volume would be skipped from the backup operation + Skip VolumeActionType = "skip" + // fs-backup action implies that the volume would be backed up via file system copy method using the uploader(kopia/restic) configured by the user + FSBackup VolumeActionType = "fs-backup" + // snapshot action can have 3 different meaning based on velero configuration and backup spec - cloud provider based snapshots, local csi snapshots and datamover snapshots + Snapshot VolumeActionType = "snapshot" +) + +// Action defined as one action for a specific way of backup +type Action struct { + // Type defined specific type of action, currently only support 'skip' + Type VolumeActionType `yaml:"type"` + // Parameters defined map of parameters when executing a specific action + Parameters map[string]any `yaml:"parameters,omitempty"` +} + +// IncludeExcludePolicy defined policy to include or exclude resources based on the names +type IncludeExcludePolicy struct { + // The following fields have the same semantics as those from the spec of backup. + // Refer to the comment in the velerov1api.BackupSpec for more details. + IncludedClusterScopedResources []string `yaml:"includedClusterScopedResources"` + ExcludedClusterScopedResources []string `yaml:"excludedClusterScopedResources"` + IncludedNamespaceScopedResources []string `yaml:"includedNamespaceScopedResources"` + ExcludedNamespaceScopedResources []string `yaml:"excludedNamespaceScopedResources"` +} + +func (p *IncludeExcludePolicy) Validate() error { + if err := p.validateIncludeExclude(p.IncludedClusterScopedResources, p.ExcludedClusterScopedResources); err != nil { + return err + } + return p.validateIncludeExclude(p.IncludedNamespaceScopedResources, p.ExcludedNamespaceScopedResources) +} + +func (p *IncludeExcludePolicy) validateIncludeExclude(includesList, excludesList []string) error { + includes := sets.NewString(includesList...) + excludes := sets.NewString(excludesList...) + + if includes.Has("*") || excludes.Has("*") { + return fmt.Errorf("cannot use '*' in includes or excludes filters in the policy") + } + for _, itm := range excludes.List() { + if includes.Has(itm) { + return fmt.Errorf("excludes list cannot contain an item in the includes list: %s", itm) + } + } + return nil +} + +// VolumePolicy defined policy to conditions to match Volumes and related action to handle matched Volumes +type VolumePolicy struct { + // Conditions defined list of conditions to match Volumes + Conditions map[string]any `yaml:"conditions"` + Action Action `yaml:"action"` +} + +// ResourcePolicies currently defined slice of volume policies to handle backup +type ResourcePolicies struct { + Version string `yaml:"version"` + VolumePolicies []VolumePolicy `yaml:"volumePolicies"` + IncludeExcludePolicy *IncludeExcludePolicy `yaml:"includeExcludePolicy"` + // we may support other resource policies in the future, and they could be added separately + // OtherResourcePolicies []OtherResourcePolicy +} + +type Policies struct { + version string + volumePolicies []volPolicy + includeExcludePolicy *IncludeExcludePolicy + // OtherPolicies +} + +func unmarshalResourcePolicies(yamlData *string) (*ResourcePolicies, error) { + resPolicies := &ResourcePolicies{} + err := decodeStruct(strings.NewReader(*yamlData), resPolicies) + if err != nil { + return nil, fmt.Errorf("failed to decode yaml data into resource policies %v", err) + } + + for _, vp := range resPolicies.VolumePolicies { + if raw, ok := vp.Conditions["pvcLabels"]; ok { + switch raw.(type) { + case map[string]any, map[string]string: + default: + return nil, fmt.Errorf("pvcLabels must be a map of string to string, got %T", raw) + } + } + } + return resPolicies, nil +} + +func (p *Policies) BuildPolicy(resPolicies *ResourcePolicies) error { + for _, vp := range resPolicies.VolumePolicies { + con, err := unmarshalVolConditions(vp.Conditions) + if err != nil { + return errors.WithStack(err) + } + volCap, err := parseCapacity(con.Capacity) + if err != nil { + return errors.WithStack(err) + } + var volP volPolicy + volP.action = vp.Action + volP.conditions = append(volP.conditions, &capacityCondition{capacity: *volCap}) + volP.conditions = append(volP.conditions, &storageClassCondition{storageClass: con.StorageClass}) + volP.conditions = append(volP.conditions, &nfsCondition{nfs: con.NFS}) + volP.conditions = append(volP.conditions, &csiCondition{csi: con.CSI}) + volP.conditions = append(volP.conditions, &volumeTypeCondition{volumeTypes: con.VolumeTypes}) + if len(con.PVCLabels) > 0 { + volP.conditions = append(volP.conditions, &pvcLabelsCondition{labels: con.PVCLabels}) + } + if len(con.PVCPhase) > 0 { + volP.conditions = append(volP.conditions, &pvcPhaseCondition{phases: con.PVCPhase}) + } + p.volumePolicies = append(p.volumePolicies, volP) + } + + // Other resource policies + + p.version = resPolicies.Version + p.includeExcludePolicy = resPolicies.IncludeExcludePolicy + return nil +} + +func (p *Policies) match(res *structuredVolume) *Action { + for _, policy := range p.volumePolicies { + isAllMatch := false + for _, con := range policy.conditions { + if !con.match(res) { + isAllMatch = false + break + } + isAllMatch = true + } + if isAllMatch { + return &policy.action + } + } + return nil +} + +func (p *Policies) GetMatchAction(res any) (*Action, error) { + data, ok := res.(VolumeFilterData) + if !ok { + return nil, errors.New("failed to convert input to VolumeFilterData") + } + + volume := &structuredVolume{} + switch { + case data.PersistentVolume != nil: + volume.parsePV(data.PersistentVolume) + if data.PVC != nil { + volume.parsePVC(data.PVC) + } + case data.PodVolume != nil: + volume.parsePodVolume(data.PodVolume) + if data.PVC != nil { + volume.parsePVC(data.PVC) + } + case data.PVC != nil: + // Handle PVC-only scenarios (e.g., unbound PVCs) + volume.parsePVC(data.PVC) + default: + return nil, errors.New("failed to convert object") + } + + return p.match(volume), nil +} + +func (p *Policies) Validate() error { + if p.version != currentSupportDataVersion { + return fmt.Errorf("incompatible version number %s with supported version %s", p.version, currentSupportDataVersion) + } + + for _, policy := range p.volumePolicies { + if err := policy.action.validate(); err != nil { + return errors.WithStack(err) + } + for _, con := range policy.conditions { + if err := con.validate(); err != nil { + return errors.WithStack(err) + } + } + } + + if p.GetIncludeExcludePolicy() != nil { + if err := p.GetIncludeExcludePolicy().Validate(); err != nil { + return errors.WithStack(err) + } + } + + return nil +} + +func (p *Policies) GetIncludeExcludePolicy() *IncludeExcludePolicy { + return p.includeExcludePolicy +} + +func GetResourcePoliciesFromBackup( + backup velerov1api.Backup, + client crclient.Client, + logger logrus.FieldLogger, +) (resourcePolicies *Policies, err error) { + if backup.Spec.ResourcePolicy != nil && + strings.EqualFold(backup.Spec.ResourcePolicy.Kind, ConfigmapRefType) { + policiesConfigMap := &corev1api.ConfigMap{} + err = client.Get( + context.Background(), + crclient.ObjectKey{Namespace: backup.Namespace, Name: backup.Spec.ResourcePolicy.Name}, + policiesConfigMap, + ) + if err != nil { + logger.Errorf("Fail to get ResourcePolicies %s ConfigMap with error %s.", + backup.Namespace+"/"+backup.Spec.ResourcePolicy.Name, err.Error()) + return nil, fmt.Errorf("fail to get ResourcePolicies %s ConfigMap with error %s", + backup.Namespace+"/"+backup.Spec.ResourcePolicy.Name, err.Error()) + } + resourcePolicies, err = getResourcePoliciesFromConfig(policiesConfigMap) + if err != nil { + logger.Errorf("Fail to read ResourcePolicies from ConfigMap %s with error %s.", + backup.Namespace+"/"+backup.Name, err.Error()) + return nil, fmt.Errorf("fail to read the ResourcePolicies from ConfigMap %s with error %s", + backup.Namespace+"/"+backup.Name, err.Error()) + } else if err = resourcePolicies.Validate(); err != nil { + logger.Errorf("Fail to validate ResourcePolicies in ConfigMap %s with error %s.", + backup.Namespace+"/"+backup.Name, err.Error()) + return nil, fmt.Errorf("fail to validate ResourcePolicies in ConfigMap %s with error %s", + backup.Namespace+"/"+backup.Name, err.Error()) + } + } + + return resourcePolicies, nil +} + +func getResourcePoliciesFromConfig(cm *corev1api.ConfigMap) (*Policies, error) { + if cm == nil { + return nil, fmt.Errorf("could not parse config from nil configmap") + } + if len(cm.Data) != 1 { + return nil, fmt.Errorf("illegal resource policies %s/%s configmap", cm.Namespace, cm.Name) + } + + var yamlData string + for _, v := range cm.Data { + yamlData = v + } + + resPolicies, err := unmarshalResourcePolicies(&yamlData) + if err != nil { + return nil, errors.WithStack(err) + } + + policies := &Policies{} + if err := policies.BuildPolicy(resPolicies); err != nil { + return nil, errors.WithStack(err) + } + + return policies, nil +} diff --git a/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_filter_data.go b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_filter_data.go new file mode 100644 index 000000000..1f816f8a5 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_filter_data.go @@ -0,0 +1,21 @@ +package resourcepolicies + +import ( + corev1api "k8s.io/api/core/v1" +) + +// VolumeFilterData bundles the volume data needed for volume policy filtering +type VolumeFilterData struct { + PersistentVolume *corev1api.PersistentVolume + PodVolume *corev1api.Volume + PVC *corev1api.PersistentVolumeClaim +} + +// NewVolumeFilterData constructs a new VolumeFilterData instance. +func NewVolumeFilterData(pv *corev1api.PersistentVolume, podVol *corev1api.Volume, pvc *corev1api.PersistentVolumeClaim) VolumeFilterData { + return VolumeFilterData{ + PersistentVolume: pv, + PodVolume: podVol, + PVC: pvc, + } +} diff --git a/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_resources.go b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_resources.go new file mode 100644 index 000000000..4ad34f484 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_resources.go @@ -0,0 +1,302 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package resourcepolicies + +import ( + "bytes" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/labels" + + "github.com/pkg/errors" + "gopkg.in/yaml.v3" + corev1api "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +type volPolicy struct { + action Action + conditions []volumeCondition +} + +type volumeCondition interface { + match(v *structuredVolume) bool + validate() error +} + +// capacity consist of the lower and upper boundary +type capacity struct { + lower resource.Quantity + upper resource.Quantity +} + +type structuredVolume struct { + capacity resource.Quantity + storageClass string + nfs *nFSVolumeSource + csi *csiVolumeSource + volumeType SupportedVolume + pvcLabels map[string]string + pvcPhase string +} + +func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) { + s.capacity = *pv.Spec.Capacity.Storage() + s.storageClass = pv.Spec.StorageClassName + nfs := pv.Spec.NFS + if nfs != nil { + s.nfs = &nFSVolumeSource{Server: nfs.Server, Path: nfs.Path} + } + + csi := pv.Spec.CSI + if csi != nil { + s.csi = &csiVolumeSource{Driver: csi.Driver, VolumeAttributes: csi.VolumeAttributes} + } + + s.volumeType = getVolumeTypeFromPV(pv) +} + +func (s *structuredVolume) parsePVC(pvc *corev1api.PersistentVolumeClaim) { + if pvc != nil { + if len(pvc.GetLabels()) > 0 { + s.pvcLabels = pvc.Labels + } + s.pvcPhase = string(pvc.Status.Phase) + } +} + +func (s *structuredVolume) parsePodVolume(vol *corev1api.Volume) { + nfs := vol.NFS + if nfs != nil { + s.nfs = &nFSVolumeSource{Server: nfs.Server, Path: nfs.Path} + } + + csi := vol.CSI + if csi != nil { + s.csi = &csiVolumeSource{Driver: csi.Driver, VolumeAttributes: csi.VolumeAttributes} + } + + s.volumeType = getVolumeTypeFromVolume(vol) +} + +// pvcLabelsCondition defines a condition that matches if the PVC's labels contain all the provided key/value pairs. +type pvcLabelsCondition struct { + labels map[string]string +} + +func (c *pvcLabelsCondition) match(v *structuredVolume) bool { + // No labels specified: always match. + if len(c.labels) == 0 { + return true + } + if v.pvcLabels == nil { + return false + } + selector := labels.SelectorFromSet(c.labels) + return selector.Matches(labels.Set(v.pvcLabels)) +} + +func (c *pvcLabelsCondition) validate() error { + return nil +} + +// pvcPhaseCondition defines a condition that matches if the PVC's phase matches any of the provided phases. +type pvcPhaseCondition struct { + phases []string +} + +func (c *pvcPhaseCondition) match(v *structuredVolume) bool { + // No phases specified: always match. + if len(c.phases) == 0 { + return true + } + if v.pvcPhase == "" { + return false + } + for _, phase := range c.phases { + if v.pvcPhase == phase { + return true + } + } + return false +} + +func (c *pvcPhaseCondition) validate() error { + return nil +} + +type capacityCondition struct { + capacity capacity +} + +func (c *capacityCondition) match(v *structuredVolume) bool { + return c.capacity.isInRange(v.capacity) +} + +type storageClassCondition struct { + storageClass []string +} + +func (s *storageClassCondition) match(v *structuredVolume) bool { + if len(s.storageClass) == 0 { + return true + } + + if v.storageClass == "" { + return false + } + + for _, sc := range s.storageClass { + if v.storageClass == sc { + return true + } + } + + return false +} + +type nfsCondition struct { + nfs *nFSVolumeSource +} + +func (c *nfsCondition) match(v *structuredVolume) bool { + if c.nfs == nil { + return true + } + if v.nfs == nil { + return false + } + + if c.nfs.Path == "" { + if c.nfs.Server == "" { // match nfs: {} + return v.nfs != nil + } + if c.nfs.Server != v.nfs.Server { + return false + } + return true + } + if c.nfs.Path != v.nfs.Path { + return false + } + if c.nfs.Server == "" { + return true + } + if c.nfs.Server != v.nfs.Server { + return false + } + return true +} + +type csiCondition struct { + csi *csiVolumeSource +} + +func (c *csiCondition) match(v *structuredVolume) bool { + if c.csi == nil { + return true + } + + if c.csi.Driver == "" { // match csi: {} + return v.csi != nil + } + + if v.csi == nil { + return false + } + + if c.csi.Driver != v.csi.Driver { + return false + } + + if len(c.csi.VolumeAttributes) == 0 { + return true + } + + if len(v.csi.VolumeAttributes) == 0 { + return false + } + + for key, value := range c.csi.VolumeAttributes { + if value != v.csi.VolumeAttributes[key] { + return false + } + } + + return true +} + +// parseCapacity parse string into capacity format +func parseCapacity(cap string) (*capacity, error) { + if cap == "" { + cap = "," + } + capacities := strings.Split(cap, ",") + var quantities []resource.Quantity + if len(capacities) != 2 { + return nil, fmt.Errorf("wrong format of Capacity %v", cap) + } + + for _, v := range capacities { + if strings.TrimSpace(v) == "" { + // case similar "10Gi," + // if empty, the quantity will assigned with 0 + quantities = append(quantities, *resource.NewQuantity(int64(0), resource.DecimalSI)) + } else { + quantity, err := resource.ParseQuantity(strings.TrimSpace(v)) + if err != nil { + return nil, fmt.Errorf("wrong format of Capacity %v with err %v", v, err) + } + quantities = append(quantities, quantity) + } + } + + return &capacity{lower: quantities[0], upper: quantities[1]}, nil +} + +// isInRange returns true if the quantity y is in range of capacity, or it returns false +func (c *capacity) isInRange(y resource.Quantity) bool { + if c.lower.IsZero() && c.upper.Cmp(y) >= 0 { + // [0, a] y + return true + } + if c.upper.IsZero() && c.lower.Cmp(y) <= 0 { + // [b, 0] y + return true + } + if !c.lower.IsZero() && !c.upper.IsZero() { + // [a, b] y + return c.lower.Cmp(y) <= 0 && c.upper.Cmp(y) >= 0 + } + return false +} + +// unmarshalVolConditions parse map[string]any into volumeConditions format +// and validate key fields of the map. +func unmarshalVolConditions(con map[string]any) (*volumeConditions, error) { + volConditons := &volumeConditions{} + buffer := new(bytes.Buffer) + err := yaml.NewEncoder(buffer).Encode(con) + if err != nil { + return nil, errors.Wrap(err, "failed to encode volume conditions") + } + + if err := decodeStruct(buffer, volConditons); err != nil { + return nil, errors.Wrap(err, "failed to decode volume conditions") + } + return volConditons, nil +} diff --git a/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_resources_validator.go b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_resources_validator.go new file mode 100644 index 000000000..b6031eec2 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_resources_validator.go @@ -0,0 +1,102 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package resourcepolicies + +import ( + "fmt" + "io" + + "github.com/pkg/errors" + "gopkg.in/yaml.v3" +) + +const currentSupportDataVersion = "v1" + +type csiVolumeSource struct { + Driver string `yaml:"driver,omitempty"` + // CSI volume attributes + VolumeAttributes map[string]string `yaml:"volumeAttributes,omitempty"` +} + +type nFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string `yaml:"server,omitempty"` + // Path is the exported NFS share + Path string `yaml:"path,omitempty"` +} + +// volumeConditions defined the current format of conditions we parsed +type volumeConditions struct { + Capacity string `yaml:"capacity,omitempty"` + StorageClass []string `yaml:"storageClass,omitempty"` + NFS *nFSVolumeSource `yaml:"nfs,omitempty"` + CSI *csiVolumeSource `yaml:"csi,omitempty"` + VolumeTypes []SupportedVolume `yaml:"volumeTypes,omitempty"` + PVCLabels map[string]string `yaml:"pvcLabels,omitempty"` + PVCPhase []string `yaml:"pvcPhase,omitempty"` +} + +func (c *capacityCondition) validate() error { + // [0, a] + // [a, b] + // [b, 0] + // ==> low <= upper or upper is zero + if (c.capacity.upper.Cmp(c.capacity.lower) >= 0) || + (!c.capacity.lower.IsZero() && c.capacity.upper.IsZero()) { + return nil + } + return errors.Errorf("illegal values for capacity %v", c.capacity) +} + +func (s *storageClassCondition) validate() error { + // validate by yamlv3 + return nil +} + +func (c *nfsCondition) validate() error { + // validate by yamlv3 + return nil +} + +func (c *csiCondition) validate() error { + if c != nil && c.csi != nil && c.csi.Driver == "" && c.csi.VolumeAttributes != nil { + return errors.New("csi driver should not be empty when filtering by volume attributes") + } + + return nil +} + +// decodeStruct restric validate the keys in decoded mappings to exist as fields in the struct being decoded into +func decodeStruct(r io.Reader, s any) error { + dec := yaml.NewDecoder(r) + dec.KnownFields(true) + return dec.Decode(s) +} + +// validate check action format +func (a *Action) validate() error { + // validate Type + valid := false + if a.Type == Skip || a.Type == Snapshot || a.Type == FSBackup { + valid = true + } + if !valid { + return fmt.Errorf("invalid action type %s", a.Type) + } + + // TODO validate parameters + return nil +} diff --git a/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_types_conditions.go b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_types_conditions.go new file mode 100644 index 000000000..0ee57166b --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/internal/resourcepolicies/volume_types_conditions.go @@ -0,0 +1,247 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcepolicies + +import ( + corev1api "k8s.io/api/core/v1" +) + +type volumeTypeCondition struct { + volumeTypes []SupportedVolume +} + +type SupportedVolume string + +const ( + AWSAzureDisk SupportedVolume = "awsAzureDisk" + AWSElasticBlockStore SupportedVolume = "awsElasticBlockStore" + AzureDisk SupportedVolume = "azureDisk" + AzureFile SupportedVolume = "azureFile" + Cinder SupportedVolume = "cinder" + CephFS SupportedVolume = "cephfs" + ConfigMap SupportedVolume = "configMap" + CSI SupportedVolume = "csi" + DownwardAPI SupportedVolume = "downwardAPI" + EmptyDir SupportedVolume = "emptyDir" + Ephemeral SupportedVolume = "ephemeral" + FC SupportedVolume = "fc" + Flocker SupportedVolume = "flocker" + FlexVolume SupportedVolume = "flexVolume" + GitRepo SupportedVolume = "gitRepo" + Glusterfs SupportedVolume = "glusterfs" + GCEPersistentDisk SupportedVolume = "gcePersistentDisk" + HostPath SupportedVolume = "hostPath" + ISCSI SupportedVolume = "iscsi" + Local SupportedVolume = "local" + NFS SupportedVolume = "nfs" + PhotonPersistentDisk SupportedVolume = "photonPersistentDisk" + PortworxVolume SupportedVolume = "portworxVolume" + Projected SupportedVolume = "projected" + Quobyte SupportedVolume = "quobyte" + RBD SupportedVolume = "rbd" + ScaleIO SupportedVolume = "scaleIO" + Secret SupportedVolume = "secret" + StorageOS SupportedVolume = "storageOS" + VsphereVolume SupportedVolume = "vsphereVolume" +) + +func (v *volumeTypeCondition) match(s *structuredVolume) bool { + if len(v.volumeTypes) == 0 { + return true + } + + for _, vt := range v.volumeTypes { + if vt == s.volumeType { + return true + } + } + return false +} + +func (v *volumeTypeCondition) validate() error { + // validate by yamlv3 + return nil +} + +func getVolumeTypeFromPV(pv *corev1api.PersistentVolume) SupportedVolume { + if pv == nil { + return "" + } + + if pv.Spec.AWSElasticBlockStore != nil { + return AWSElasticBlockStore + } + if pv.Spec.AzureDisk != nil { + return AzureDisk + } + if pv.Spec.AzureFile != nil { + return AzureFile + } + if pv.Spec.CephFS != nil { + return CephFS + } + if pv.Spec.Cinder != nil { + return Cinder + } + if pv.Spec.CSI != nil { + return CSI + } + if pv.Spec.FC != nil { + return FC + } + if pv.Spec.Flocker != nil { + return Flocker + } + if pv.Spec.FlexVolume != nil { + return FlexVolume + } + if pv.Spec.GCEPersistentDisk != nil { + return GCEPersistentDisk + } + if pv.Spec.Glusterfs != nil { + return Glusterfs + } + if pv.Spec.HostPath != nil { + return HostPath + } + if pv.Spec.ISCSI != nil { + return ISCSI + } + if pv.Spec.Local != nil { + return Local + } + if pv.Spec.NFS != nil { + return NFS + } + if pv.Spec.PhotonPersistentDisk != nil { + return PhotonPersistentDisk + } + if pv.Spec.PortworxVolume != nil { + return PortworxVolume + } + if pv.Spec.Quobyte != nil { + return Quobyte + } + if pv.Spec.RBD != nil { + return RBD + } + if pv.Spec.ScaleIO != nil { + return ScaleIO + } + if pv.Spec.StorageOS != nil { + return StorageOS + } + if pv.Spec.VsphereVolume != nil { + return VsphereVolume + } + return "" +} + +func getVolumeTypeFromVolume(vol *corev1api.Volume) SupportedVolume { + if vol == nil { + return "" + } + + if vol.AWSElasticBlockStore != nil { + return AWSElasticBlockStore + } + if vol.AzureDisk != nil { + return AzureDisk + } + if vol.AzureFile != nil { + return AzureFile + } + if vol.CephFS != nil { + return CephFS + } + if vol.Cinder != nil { + return Cinder + } + if vol.CSI != nil { + return CSI + } + if vol.FC != nil { + return FC + } + if vol.Flocker != nil { + return Flocker + } + if vol.FlexVolume != nil { + return FlexVolume + } + if vol.GCEPersistentDisk != nil { + return GCEPersistentDisk + } + if vol.GitRepo != nil { + return GitRepo + } + if vol.Glusterfs != nil { + return Glusterfs + } + if vol.ISCSI != nil { + return ISCSI + } + if vol.NFS != nil { + return NFS + } + if vol.Secret != nil { + return Secret + } + if vol.RBD != nil { + return RBD + } + if vol.DownwardAPI != nil { + return DownwardAPI + } + if vol.ConfigMap != nil { + return ConfigMap + } + if vol.Projected != nil { + return Projected + } + if vol.Ephemeral != nil { + return Ephemeral + } + if vol.FC != nil { + return FC + } + if vol.PhotonPersistentDisk != nil { + return PhotonPersistentDisk + } + if vol.PortworxVolume != nil { + return PortworxVolume + } + if vol.Quobyte != nil { + return Quobyte + } + if vol.ScaleIO != nil { + return ScaleIO + } + if vol.StorageOS != nil { + return StorageOS + } + if vol.VsphereVolume != nil { + return VsphereVolume + } + if vol.HostPath != nil { + return HostPath + } + if vol.EmptyDir != nil { + return EmptyDir + } + return "" +} diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go index fd30b0452..621ffcfcc 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go @@ -36,8 +36,9 @@ type BackupRepositorySpec struct { RepositoryType string `json:"repositoryType"` // ResticIdentifier is the full restic-compatible string for identifying - // this repository. - ResticIdentifier string `json:"resticIdentifier"` + // this repository. This field is only used when RepositoryType is "restic". + // +optional + ResticIdentifier string `json:"resticIdentifier,omitempty"` // MaintenanceFrequency is how often maintenance should be run. MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"` diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go index 858894dc7..24af8132d 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -113,6 +113,10 @@ type BackupSpec struct { // +optional TTL metav1.Duration `json:"ttl,omitempty"` + // VolumeGroupSnapshotLabelKey specifies the label key to group PVCs under a VGS. + // +optional + VolumeGroupSnapshotLabelKey string `json:"volumeGroupSnapshotLabelKey,omitempty"` + // IncludeClusterResources specifies whether cluster-scoped resources // should be included for consideration in the backup. // +optional @@ -164,7 +168,7 @@ type BackupSpec struct { ItemOperationTimeout metav1.Duration `json:"itemOperationTimeout,omitempty"` // ResourcePolicy specifies the referenced resource policies that backup should follow // +optional - ResourcePolicy *v1.TypedLocalObjectReference `json:"resourcePolicy,omitempty"` + ResourcePolicy *corev1api.TypedLocalObjectReference `json:"resourcePolicy,omitempty"` // SnapshotMoveData specifies whether snapshot data should be moved // +optional @@ -284,7 +288,7 @@ const ( // BackupPhase is a string representation of the lifecycle phase // of a Velero backup. -// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting +// +kubebuilder:validation:Enum=New;Queued;ReadyToStart;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting type BackupPhase string const ( @@ -292,6 +296,12 @@ const ( // yet processed by the BackupController. BackupPhaseNew BackupPhase = "New" + // BackupPhaseQueued means the backup has been added to the queue and is waiting for the Queue to move it out of the queue. + BackupPhaseQueued BackupPhase = "Queued" + + // BackupPhaseReadyToStart means the backup has been pulled from the queue and is ready to start. + BackupPhaseReadyToStart BackupPhase = "ReadyToStart" + // BackupPhaseFailedValidation means the backup has failed // the controller's validations and therefore will not run. BackupPhaseFailedValidation BackupPhase = "FailedValidation" @@ -367,6 +377,11 @@ type BackupStatus struct { // +optional Phase BackupPhase `json:"phase,omitempty"` + // QueuePosition is the position of the backup in the queue. + // Only relevant when Phase is "Queued" + // +optional + QueuePosition int `json:"queuePosition,omitempty"` + // ValidationErrors is a slice of all validation errors (if // applicable). // +optional diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go index e44671222..4472cf271 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1 import ( + "errors" + corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -146,8 +148,15 @@ type ObjectStorageLocation struct { Prefix string `json:"prefix,omitempty"` // CACert defines a CA bundle to use when verifying TLS connections to the provider. + // Deprecated: Use CACertRef instead. // +optional CACert []byte `json:"caCert,omitempty"` + + // CACertRef is a reference to a Secret containing the CA certificate bundle to use + // when verifying TLS connections to the provider. The Secret must be in the same + // namespace as the BackupStorageLocation. + // +optional + CACertRef *corev1api.SecretKeySelector `json:"caCertRef,omitempty"` } // BackupStorageLocationPhase is the lifecycle phase of a Velero BackupStorageLocation. @@ -177,3 +186,13 @@ const ( // TODO(2.0): remove the AccessMode field from BackupStorageLocationStatus. // TODO(2.0): remove the LastSyncedRevision field from BackupStorageLocationStatus. + +// Validate validates the BackupStorageLocation to ensure that only one of CACert or CACertRef is set. +func (bsl *BackupStorageLocation) Validate() error { + if bsl.Spec.ObjectStorage != nil && + bsl.Spec.ObjectStorage.CACert != nil && + bsl.Spec.ObjectStorage.CACertRef != nil { + return errors.New("cannot specify both caCert and caCertRef in objectStorage") + } + return nil +} diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go index c86b4e91b..85d8b05aa 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go @@ -101,6 +101,24 @@ const ( // ExcludeFromBackupLabel is the label to exclude k8s resource from backup, // even if the resource contains a matching selector label. ExcludeFromBackupLabel = "velero.io/exclude-from-backup" + + // SkipFromBackupAnnotation is the annotation used by internal BackupItemActions + // to indicate that a resource should be skipped from backup, + // even if it doesn't have the ExcludeFromBackupLabel. + // This is used in cases where we want to skip backup of a resource based on some logic in a plugin. + // + // Notice: SkipFromBackupAnnotation's priority is higher than MustIncludeAdditionalItemAnnotation. + // If SkipFromBackupAnnotation is set, the resource will be skipped even if MustIncludeAdditionalItemAnnotation is set. + SkipFromBackupAnnotation = "velero.io/skip-from-backup" + + // defaultVGSLabelKey is the default label key used to group PVCs under a VolumeGroupSnapshot + DefaultVGSLabelKey = "velero.io/volume-group" + + // PVBLabel is the label key used to identify the pvb for pvb pod + PVBLabel = "velero.io/pod-volume-backup" + + // PVRLabel is the label key used to identify the pvb for pvr pod + PVRLabel = "velero.io/pod-volume-restore" ) type AsyncOperationIDPrefix string @@ -127,6 +145,9 @@ const ( VolumeSnapshotClassDriverBackupAnnotationPrefix = "velero.io/csi-volumesnapshot-class" VolumeSnapshotClassDriverPVCAnnotation = "velero.io/csi-volumesnapshot-class" + // https://kubernetes.io/zh-cn/docs/concepts/storage/volume-snapshot-classes/ + VolumeSnapshotClassKubernetesAnnotation = "snapshot.storage.kubernetes.io/is-default-class" + // There is no release w/ these constants exported. Using the strings for now. // CSI Annotation volumesnapshotclass // https://github.com/kubernetes-csi/external-snapshotter/blob/master/pkg/utils/util.go#L59-L60 @@ -148,4 +169,13 @@ const ( // DataUploadNameAnnotation is the label key for the DataUpload name DataUploadNameAnnotation = "velero.io/data-upload-name" + + // Label used on VolumeGroupSnapshotClass to mark it as Velero's default for a CSI driver + VolumeGroupSnapshotClassDefaultLabel = "velero.io/csi-volumegroupsnapshot-class" + + // Annotation on PVC to override the VGS class to use + VolumeGroupSnapshotClassAnnotationPVC = "velero.io/csi-volume-group-snapshot-class" + + // Annotation prefix on Backup to override VGS class per CSI driver + VolumeGroupSnapshotClassAnnotationBackupPrefix = "velero.io/csi-volumegroupsnapshot-class_" ) diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go index b3070e3dd..b246906fb 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go @@ -57,15 +57,23 @@ type PodVolumeBackupSpec struct { // +optional // +nullable UploaderSettings map[string]string `json:"uploaderSettings,omitempty"` + + // Cancel indicates request to cancel the ongoing PodVolumeBackup. It can be set + // when the PodVolumeBackup is in InProgress phase + Cancel bool `json:"cancel,omitempty"` } // PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup. -// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed +// +kubebuilder:validation:Enum=New;Accepted;Prepared;InProgress;Canceling;Canceled;Completed;Failed type PodVolumeBackupPhase string const ( PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New" + PodVolumeBackupPhaseAccepted PodVolumeBackupPhase = "Accepted" + PodVolumeBackupPhasePrepared PodVolumeBackupPhase = "Prepared" PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress" + PodVolumeBackupPhaseCanceling PodVolumeBackupPhase = "Canceling" + PodVolumeBackupPhaseCanceled PodVolumeBackupPhase = "Canceled" PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed" PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed" ) @@ -109,20 +117,32 @@ type PodVolumeBackupStatus struct { // about the backup operation. // +optional Progress shared.DataMoveOperationProgress `json:"progress,omitempty"` + + // IncrementalBytes holds the number of bytes new or changed since the last backup + // +optional + IncrementalBytes int64 `json:"incrementalBytes,omitempty"` + + // AcceptedTimestamp records the time the pod volume backup is to be prepared. + // The server's time is used for AcceptedTimestamp + // +optional + // +nullable + AcceptedTimestamp *metav1.Time `json:"acceptedTimestamp,omitempty"` } // TODO(2.0) After converting all resources to use the runttime-controller client, // the genclient and k8s:deepcopy markers will no longer be needed and should be removed. // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Backup status such as New/InProgress" -// +kubebuilder:printcolumn:name="Created",type="date",JSONPath=".status.startTimestamp",description="Time when this backup was started" -// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be backed up" -// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be backed up" -// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be backed up" -// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer" +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="PodVolumeBackup status such as New/InProgress" +// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this PodVolumeBackup was started" +// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes" +// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes" +// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10 // +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this PodVolumeBackup was created" +// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the PodVolumeBackup is processed" +// +kubebuilder:printcolumn:name="Uploader",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer" // +kubebuilder:object:root=true // +kubebuilder:object:generate=true diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go index 34bc7e530..c1d75b71c 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go @@ -54,15 +54,27 @@ type PodVolumeRestoreSpec struct { // +optional // +nullable UploaderSettings map[string]string `json:"uploaderSettings,omitempty"` + + // Cancel indicates request to cancel the ongoing PodVolumeRestore. It can be set + // when the PodVolumeRestore is in InProgress phase + Cancel bool `json:"cancel,omitempty"` + + // SnapshotSize is the logical size in Bytes of the snapshot. + // +optional + SnapshotSize int64 `json:"snapshotSize,omitempty"` } // PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore. -// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed +// +kubebuilder:validation:Enum=New;Accepted;Prepared;InProgress;Canceling;Canceled;Completed;Failed type PodVolumeRestorePhase string const ( PodVolumeRestorePhaseNew PodVolumeRestorePhase = "New" + PodVolumeRestorePhaseAccepted PodVolumeRestorePhase = "Accepted" + PodVolumeRestorePhasePrepared PodVolumeRestorePhase = "Prepared" PodVolumeRestorePhaseInProgress PodVolumeRestorePhase = "InProgress" + PodVolumeRestorePhaseCanceling PodVolumeRestorePhase = "Canceling" + PodVolumeRestorePhaseCanceled PodVolumeRestorePhase = "Canceled" PodVolumeRestorePhaseCompleted PodVolumeRestorePhase = "Completed" PodVolumeRestorePhaseFailed PodVolumeRestorePhase = "Failed" ) @@ -95,6 +107,16 @@ type PodVolumeRestoreStatus struct { // about the restore operation. // +optional Progress shared.DataMoveOperationProgress `json:"progress,omitempty"` + + // AcceptedTimestamp records the time the pod volume restore is to be prepared. + // The server's time is used for AcceptedTimestamp + // +optional + // +nullable + AcceptedTimestamp *metav1.Time `json:"acceptedTimestamp,omitempty"` + + // Node is name of the node where the pod volume restore is processed. + // +optional + Node string `json:"node,omitempty"` } // TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed. @@ -103,14 +125,14 @@ type PodVolumeRestoreStatus struct { // +kubebuilder:object:generate=true // +kubebuilder:object:root=true // +kubebuilder:storageversion -// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be restored" -// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be restored" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="PodVolumeRestore status such as New/InProgress" +// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this PodVolumeRestore was started" +// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes" +// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes" +// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where the backup data is stored" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this PodVolumeRestore was created" +// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the PodVolumeRestore is processed" // +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer" -// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be restored" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Restore status such as New/InProgress" -// +kubebuilder:printcolumn:name="TotalBytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Pod Volume Restore status such as New/InProgress" -// +kubebuilder:printcolumn:name="BytesDone",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Pod Volume Restore status such as New/InProgress" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" type PodVolumeRestore struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go index 377a92737..5dd99edb7 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -123,7 +123,7 @@ type RestoreSpec struct { // ResourceModifier specifies the reference to JSON resource patches that should be applied to resources before restoration. // +optional // +nullable - ResourceModifier *v1.TypedLocalObjectReference `json:"resourceModifier,omitempty"` + ResourceModifier *corev1api.TypedLocalObjectReference `json:"resourceModifier,omitempty"` // UploaderConfig specifies the configuration for the restore. // +optional diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go index 2fd13cfd2..0702f8623 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -915,6 +915,11 @@ func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) { *out = make([]byte, len(*in)) copy(*out, *in) } + if in.CACertRef != nil { + in, out := &in.CACertRef, &out.CACertRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageLocation. @@ -1043,6 +1048,10 @@ func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) { *out = (*in).DeepCopy() } out.Progress = in.Progress + if in.AcceptedTimestamp != nil { + in, out := &in.AcceptedTimestamp, &out.AcceptedTimestamp + *out = (*in).DeepCopy() + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus. @@ -1149,6 +1158,10 @@ func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) { *out = (*in).DeepCopy() } out.Progress = in.Progress + if in.AcceptedTimestamp != nil { + in, out := &in.AcceptedTimestamp, &out.AcceptedTimestamp + *out = (*in).DeepCopy() + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreStatus. diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_download_types.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_download_types.go index f79af10a8..4ea7128ec 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_download_types.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_download_types.go @@ -58,6 +58,10 @@ type DataDownloadSpec struct { // NodeOS is OS of the node where the DataDownload is processed. // +optional NodeOS NodeOS `json:"nodeOS,omitempty"` + + // SnapshotSize is the logical size in Bytes of the snapshot. + // +optional + SnapshotSize int64 `json:"snapshotSize,omitempty"` } // TargetVolumeSpec is the specification for a target PVC. diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_upload_types.go b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_upload_types.go index 140a94ec3..751da4555 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_upload_types.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_upload_types.go @@ -79,6 +79,10 @@ type CSISnapshotSpec struct { // SnapshotClass is the name of the snapshot class that the volume snapshot is created with // +optional SnapshotClass string `json:"snapshotClass"` + + // Driver is the driver used by the VolumeSnapshotContent + // +optional + Driver string `json:"driver,omitempty"` } // DataUploadPhase represents the lifecycle phase of a DataUpload. @@ -151,6 +155,10 @@ type DataUploadStatus struct { // +optional Progress shared.DataMoveOperationProgress `json:"progress,omitempty"` + // IncrementalBytes holds the number of bytes new or changed since the last backup + // +optional + IncrementalBytes int64 `json:"incrementalBytes,omitempty"` + // Node is name of the node where the DataUpload is processed. // +optional Node string `json:"node,omitempty"` @@ -181,6 +189,7 @@ type DataUploadStatus struct { // +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this DataUpload was started" // +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes" // +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes" +// +kubebuilder:printcolumn:name="Incremental Bytes",type="integer",format="int64",JSONPath=".status.incrementalBytes",description="Incremental bytes",priority=10 // +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataUpload was created" // +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataUpload is processed" @@ -240,4 +249,8 @@ type DataUploadResult struct { // NodeOS is OS of the node where the DataUpload is processed. // +optional NodeOS NodeOS `json:"nodeOS,omitempty"` + + // SnapshotSize is the logical size in Bytes of the snapshot. + // +optional + SnapshotSize int64 `json:"snapshotSize,omitempty"` } diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/client/dynamic.go b/vendor/github.com/vmware-tanzu/velero/pkg/client/dynamic.go index 705c28aaa..d38ca774c 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/client/dynamic.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/client/dynamic.go @@ -102,6 +102,11 @@ type StatusUpdater interface { UpdateStatus(obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) } +// Applier applies changes to an object using server-side apply +type Applier interface { + Apply(name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) +} + // Dynamic contains client methods that Velero needs for backing up and restoring resources. type Dynamic interface { Creator @@ -111,6 +116,7 @@ type Dynamic interface { Patcher Deletor StatusUpdater + Applier } // dynamicResourceClient implements Dynamic. @@ -136,6 +142,10 @@ func (d *dynamicResourceClient) Get(name string, opts metav1.GetOptions) (*unstr return d.resourceClient.Get(context.TODO(), name, opts) } +func (d *dynamicResourceClient) Apply(name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) { + return d.resourceClient.Apply(context.TODO(), name, obj, opts) +} + func (d *dynamicResourceClient) Patch(name string, data []byte) (*unstructured.Unstructured, error) { return d.resourceClient.Patch(context.TODO(), name, types.MergePatchType, data, metav1.PatchOptions{}) } diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/client/factory.go b/vendor/github.com/vmware-tanzu/velero/pkg/client/factory.go index f1524a583..4b3c8941e 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/client/factory.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/client/factory.go @@ -19,13 +19,15 @@ package client import ( "os" + volumegroupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/client-go/discovery" k8scheme "k8s.io/client-go/kubernetes/scheme" kbclient "sigs.k8s.io/controller-runtime/pkg/client" - snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "github.com/pkg/errors" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" @@ -166,6 +168,9 @@ func (f *factory) KubebuilderClient() (kbclient.Client, error) { if err := snapshotv1api.AddToScheme(scheme); err != nil { return nil, err } + if err := volumegroupsnapshotv1beta1.AddToScheme(scheme); err != nil { + return nil, err + } kubebuilderClient, err := kbclient.New(clientConfig, kbclient.Options{ Scheme: scheme, }) @@ -202,6 +207,9 @@ func (f *factory) KubebuilderWatchClient() (kbclient.WithWatch, error) { if err := snapshotv1api.AddToScheme(scheme); err != nil { return nil, err } + if err := volumegroupsnapshotv1beta1.AddToScheme(scheme); err != nil { + return nil, err + } kubebuilderWatchClient, err := kbclient.NewWithWatch(clientConfig, kbclient.Options{ Scheme: scheme, }) diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/cmd/server/config/config.go b/vendor/github.com/vmware-tanzu/velero/pkg/cmd/server/config/config.go index 33a40a3c4..e19086217 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/cmd/server/config/config.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/cmd/server/config/config.go @@ -5,15 +5,17 @@ import ( "strings" "time" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/sirupsen/logrus" "github.com/spf13/pflag" + "github.com/vmware-tanzu/velero/internal/credentials" "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" "github.com/vmware-tanzu/velero/pkg/constant" podvolumeconfigs "github.com/vmware-tanzu/velero/pkg/podvolume/configs" "github.com/vmware-tanzu/velero/pkg/types" "github.com/vmware-tanzu/velero/pkg/uploader" - "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/util/logging" ) @@ -44,22 +46,14 @@ const ( defaultMaxConcurrentK8SConnections = 30 defaultDisableInformerCache = false - // defaultCredentialsDirectory is the path on disk where credential - // files will be written to - defaultCredentialsDirectory = "/tmp/credentials" - - DefaultKeepLatestMaintenanceJobs = 3 - DefaultMaintenanceJobCPURequest = "0" - DefaultMaintenanceJobCPULimit = "0" - DefaultMaintenanceJobMemRequest = "0" - DefaultMaintenanceJobMemLimit = "0" - DefaultItemBlockWorkerCount = 1 + DefaultConcurrentBackups = 1 ) var ( // DisableableControllers is a list of controllers that can be disabled DisableableControllers = []string{ + constant.ControllerBackupQueue, constant.ControllerBackup, constant.ControllerBackupOperations, constant.ControllerBackupDeletion, @@ -121,10 +115,15 @@ var ( "datauploads.velero.io", "persistentvolumes", "persistentvolumeclaims", + "clusterroles", + "roles", "serviceaccounts", + "clusterrolebindings", + "rolebindings", "secrets", "configmaps", "limitranges", + "priorityclasses", "pods", // we fully qualify replicasets.apps because prior to Kubernetes 1.16, replicasets also // existed in the extensions API group, but we back up replicasets from "apps" so we want @@ -153,6 +152,7 @@ type Config struct { PodVolumeOperationTimeout time.Duration ResourceTerminatingTimeout time.Duration DefaultBackupTTL time.Duration + DefaultVGSLabelKey string StoreValidationFrequency time.Duration DefaultCSISnapshotTimeout time.Duration DefaultItemOperationTimeout time.Duration @@ -179,9 +179,8 @@ type Config struct { CredentialsDirectory string BackupRepoConfig string RepoMaintenanceJobConfig string - PodResources kube.PodResources - KeepLatestMaintenanceJobs int ItemBlockWorkerCount int + ConcurrentBackups int } func GetDefaultConfig() *Config { @@ -192,6 +191,7 @@ func GetDefaultConfig() *Config { DefaultVolumeSnapshotLocations: flag.NewMap().WithKeyValueDelimiter(':'), BackupSyncPeriod: defaultBackupSyncPeriod, DefaultBackupTTL: defaultBackupTTL, + DefaultVGSLabelKey: velerov1api.DefaultVGSLabelKey, DefaultCSISnapshotTimeout: defaultCSISnapshotTimeout, DefaultItemOperationTimeout: defaultItemOperationTimeout, ResourceTimeout: resourceTimeout, @@ -206,20 +206,14 @@ func GetDefaultConfig() *Config { LogLevel: logging.LogLevelFlag(logrus.InfoLevel), LogFormat: logging.NewFormatFlag(), DefaultVolumesToFsBackup: podvolumeconfigs.DefaultVolumesToFsBackup, - UploaderType: uploader.ResticType, + UploaderType: uploader.KopiaType, MaxConcurrentK8SConnections: defaultMaxConcurrentK8SConnections, DefaultSnapshotMoveData: false, DisableInformerCache: defaultDisableInformerCache, ScheduleSkipImmediately: false, - CredentialsDirectory: defaultCredentialsDirectory, - PodResources: kube.PodResources{ - CPURequest: DefaultMaintenanceJobCPULimit, - CPULimit: DefaultMaintenanceJobCPURequest, - MemoryRequest: DefaultMaintenanceJobMemRequest, - MemoryLimit: DefaultMaintenanceJobMemLimit, - }, - KeepLatestMaintenanceJobs: DefaultKeepLatestMaintenanceJobs, - ItemBlockWorkerCount: DefaultItemBlockWorkerCount, + CredentialsDirectory: credentials.DefaultStoreDirectory(), + ItemBlockWorkerCount: DefaultItemBlockWorkerCount, + ConcurrentBackups: DefaultConcurrentBackups, } return config @@ -243,6 +237,7 @@ func (c *Config) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&c.ProfilerAddress, "profiler-address", c.ProfilerAddress, "The address to expose the pprof profiler.") flags.DurationVar(&c.ResourceTerminatingTimeout, "terminating-resource-timeout", c.ResourceTerminatingTimeout, "How long to wait on persistent volumes and namespaces to terminate during a restore before timing out.") flags.DurationVar(&c.DefaultBackupTTL, "default-backup-ttl", c.DefaultBackupTTL, "How long to wait by default before backups can be garbage collected.") + flags.StringVar(&c.DefaultVGSLabelKey, "volume-group-snapshot-label-key", c.DefaultVGSLabelKey, "Label key for grouping PVCs into VolumeGroupSnapshot. Default value is 'velero.io/volume-group'") flags.DurationVar(&c.RepoMaintenanceFrequency, "default-repo-maintain-frequency", c.RepoMaintenanceFrequency, "How often 'maintain' is run for backup repositories by default.") flags.DurationVar(&c.GarbageCollectionFrequency, "garbage-collection-frequency", c.GarbageCollectionFrequency, "How often garbage collection is run for expired backups.") flags.DurationVar(&c.ItemOperationSyncFrequency, "item-operation-sync-frequency", c.ItemOperationSyncFrequency, "How often to check status on backup/restore operations after backup/restore processing. Default is 10 seconds") @@ -256,36 +251,6 @@ func (c *Config) BindFlags(flags *pflag.FlagSet) { flags.BoolVar(&c.ScheduleSkipImmediately, "schedule-skip-immediately", c.ScheduleSkipImmediately, "Skip the first scheduled backup immediately after creating a schedule. Default is false (don't skip).") flags.Var(&c.DefaultVolumeSnapshotLocations, "default-volume-snapshot-locations", "List of unique volume providers and default volume snapshot location (provider1:location-01,provider2:location-02,...)") - flags.IntVar( - &c.KeepLatestMaintenanceJobs, - "keep-latest-maintenance-jobs", - c.KeepLatestMaintenanceJobs, - "Number of latest maintenance jobs to keep each repository. Optional.", - ) - flags.StringVar( - &c.PodResources.CPURequest, - "maintenance-job-cpu-request", - c.PodResources.CPURequest, - "CPU request for maintenance job. Default is no limit.", - ) - flags.StringVar( - &c.PodResources.MemoryRequest, - "maintenance-job-mem-request", - c.PodResources.MemoryRequest, - "Memory request for maintenance job. Default is no limit.", - ) - flags.StringVar( - &c.PodResources.CPULimit, - "maintenance-job-cpu-limit", - c.PodResources.CPULimit, - "CPU limit for maintenance job. Default is no limit.", - ) - flags.StringVar( - &c.PodResources.MemoryLimit, - "maintenance-job-mem-limit", - c.PodResources.MemoryLimit, - "Memory limit for maintenance job. Default is no limit.", - ) flags.StringVar( &c.BackupRepoConfig, "backup-repository-configmap", @@ -304,4 +269,10 @@ func (c *Config) BindFlags(flags *pflag.FlagSet) { c.ItemBlockWorkerCount, "Number of worker threads to process ItemBlocks. Default is one. Optional.", ) + flags.IntVar( + &c.ConcurrentBackups, + "concurrent-backups", + c.ConcurrentBackups, + "Number of backups to process concurrently. Default is one. Optional.", + ) } diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/constant/constant.go b/vendor/github.com/vmware-tanzu/velero/pkg/constant/constant.go index cfadcc5dc..f6ff09ae5 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/constant/constant.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/constant/constant.go @@ -1,6 +1,7 @@ package constant const ( + ControllerBackupQueue = "backup-queue" ControllerBackup = "backup" ControllerBackupOperations = "backup-operations" ControllerBackupDeletion = "backup-deletion" @@ -22,4 +23,7 @@ const ( PluginCSIPVCRestoreRIA = "velero.io/csi-pvc-restorer" PluginCsiVolumeSnapshotRestoreRIA = "velero.io/csi-volumesnapshot-restorer" + + DefaultEphemeralStorageRequest = "0" + DefaultEphemeralStorageLimit = "0" ) diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/kuberesource/kuberesource.go b/vendor/github.com/vmware-tanzu/velero/pkg/kuberesource/kuberesource.go index c39e769db..0f1dbf8ec 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/kuberesource/kuberesource.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/kuberesource/kuberesource.go @@ -33,7 +33,9 @@ var ( Secrets = schema.GroupResource{Group: "", Resource: "secrets"} VolumeSnapshotClasses = schema.GroupResource{Group: "snapshot.storage.k8s.io", Resource: "volumesnapshotclasses"} VolumeSnapshots = schema.GroupResource{Group: "snapshot.storage.k8s.io", Resource: "volumesnapshots"} + VolumeGroupSnapshots = schema.GroupResource{Group: "snapshot.storage.k8s.io", Resource: "volumegroupsnapshots"} VolumeSnapshotContents = schema.GroupResource{Group: "snapshot.storage.k8s.io", Resource: "volumesnapshotcontents"} PriorityClasses = schema.GroupResource{Group: "scheduling.k8s.io", Resource: "priorityclasses"} DataUploads = schema.GroupResource{Group: "velero.io", Resource: "datauploads"} + VGSKind = "VolumeGroupSnapshot" ) diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/label/label.go b/vendor/github.com/vmware-tanzu/velero/pkg/label/label.go index 98d232f24..bfd28b836 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/label/label.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/label/label.go @@ -18,6 +18,7 @@ package label import ( "crypto/sha256" + "crypto/sha3" "encoding/hex" "fmt" @@ -49,6 +50,17 @@ func GetValidName(label string) string { return label[:charsFromLabel] + strSha[:6] } +// ReturnNameOrHash returns the original name if it is within the DNS1035LabelMaxLength limit, +// otherwise it returns the sha3 Sum224 hash(length is 56) of the name. +func ReturnNameOrHash(name string) string { + if len(name) <= validation.DNS1035LabelMaxLength { + return name + } + + hash := sha3.Sum224([]byte(name)) + return hex.EncodeToString(hash[:]) +} + // NewSelectorForBackup returns a Selector based on the backup name. // This is useful for interacting with Listers that need a Selector. func NewSelectorForBackup(name string) labels.Selector { diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action.go index 00f3cc0ea..9c1a5665d 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action.go @@ -17,8 +17,9 @@ limitations under the License. package framework import ( + "context" + plugin "github.com/hashicorp/go-plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action_client.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action_client.go index 948cb3492..724737d01 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action_client.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action_client.go @@ -19,8 +19,9 @@ package framework import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action_server.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action_server.go index 6511591a9..7c18b4ef6 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action_server.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backup_item_action_server.go @@ -19,8 +19,9 @@ package framework import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action.go index c1e2e5401..903ec378c 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action.go @@ -17,8 +17,9 @@ limitations under the License. package v2 import ( + "context" + plugin "github.com/hashicorp/go-plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action_client.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action_client.go index 52733de2d..64695dbe3 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action_client.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action_client.go @@ -19,8 +19,9 @@ package v2 import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go index c622490e7..f8c894eba 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go @@ -19,8 +19,9 @@ package v2 import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/protobuf/types/known/emptypb" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/common/plugin_config.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/common/plugin_config.go index 58f9bafc6..82b914352 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/common/plugin_config.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/common/plugin_config.go @@ -21,7 +21,7 @@ import ( "fmt" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" + corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" ) @@ -30,7 +30,7 @@ func PluginConfigLabelSelector(kind PluginKind, name string) string { return fmt.Sprintf("velero.io/plugin-config,%s=%s", name, kind) } -func GetPluginConfig(kind PluginKind, name string, client corev1client.ConfigMapInterface) (*corev1.ConfigMap, error) { +func GetPluginConfig(kind PluginKind, name string, client corev1client.ConfigMapInterface) (*corev1api.ConfigMap, error) { opts := metav1.ListOptions{ // velero.io/plugin-config: true // velero.io/pod-volume-restore: RestoreItemAction diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action.go index afc5d548e..db4329b7f 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action.go @@ -17,8 +17,9 @@ limitations under the License. package framework import ( + "context" + plugin "github.com/hashicorp/go-plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action_client.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action_client.go index 088c42b51..bec5088db 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action_client.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action_client.go @@ -19,8 +19,9 @@ package framework import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action_server.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action_server.go index e298969d1..01abe8dc3 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action_server.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/delete_item_action_server.go @@ -19,8 +19,9 @@ package framework import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action.go index 9fb8094ee..2115d2ecf 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action.go @@ -17,8 +17,9 @@ limitations under the License. package v1 import ( + "context" + plugin "github.com/hashicorp/go-plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action_client.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action_client.go index 612a14a6c..aa597c4af 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action_client.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action_client.go @@ -19,8 +19,9 @@ package v1 import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action_server.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action_server.go index ab9ad7485..2d940550c 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action_server.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1/item_block_action_server.go @@ -19,8 +19,9 @@ package v1 import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store.go index d7c7b95e4..ea624da42 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store.go @@ -17,8 +17,9 @@ limitations under the License. package framework import ( + "context" + plugin "github.com/hashicorp/go-plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store_client.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store_client.go index 725f8e934..b59f3d1b0 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store_client.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store_client.go @@ -20,8 +20,9 @@ import ( "io" "time" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store_server.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store_server.go index 2d3ef3658..fbed21ecf 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store_server.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/object_store_server.go @@ -20,8 +20,9 @@ import ( "io" "time" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/plugin_lister.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/plugin_lister.go index 19e349007..6db81c66d 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/plugin_lister.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/plugin_lister.go @@ -17,9 +17,10 @@ limitations under the License. package framework import ( + "context" + plugin "github.com/hashicorp/go-plugin" "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action.go index 0aaf2b47e..26cef3728 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action.go @@ -17,8 +17,9 @@ limitations under the License. package framework import ( + "context" + plugin "github.com/hashicorp/go-plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action_client.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action_client.go index 8809e31c3..3a5a633f3 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action_client.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action_client.go @@ -19,8 +19,9 @@ package framework import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action_server.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action_server.go index bdc149c48..175a941bd 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action_server.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restore_item_action_server.go @@ -19,8 +19,9 @@ package framework import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action.go index 243e2fd67..00bb825b8 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action.go @@ -17,8 +17,9 @@ limitations under the License. package v2 import ( + "context" + plugin "github.com/hashicorp/go-plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_client.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_client.go index b1d61fb75..5e2f01c37 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_client.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_client.go @@ -19,8 +19,9 @@ package v2 import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go index 2795d787d..115961656 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go @@ -19,8 +19,9 @@ package v2 import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter.go index 5eb6159eb..8cbb48fa4 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter.go @@ -17,8 +17,9 @@ limitations under the License. package framework import ( + "context" + plugin "github.com/hashicorp/go-plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter_client.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter_client.go index da66f2cea..f7af07ce4 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter_client.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter_client.go @@ -19,8 +19,9 @@ package framework import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter_server.go b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter_server.go index 99bdea03a..de30c823f 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter_server.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/plugin/framework/volume_snapshotter_server.go @@ -19,8 +19,9 @@ package framework import ( "encoding/json" + "context" + "github.com/pkg/errors" - "golang.org/x/net/context" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/types/node_agent.go b/vendor/github.com/vmware-tanzu/velero/pkg/types/node_agent.go new file mode 100644 index 000000000..f456bbf55 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/pkg/types/node_agent.go @@ -0,0 +1,107 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/velero/pkg/util/kube" +) + +type LoadConcurrency struct { + // GlobalConfig specifies the concurrency number to all nodes for which per-node config is not specified + GlobalConfig int `json:"globalConfig,omitempty"` + + // PerNodeConfig specifies the concurrency number to nodes matched by rules + PerNodeConfig []RuledConfigs `json:"perNodeConfig,omitempty"` + + // PrepareQueueLength specifies the max number of loads that are under expose + PrepareQueueLength int `json:"prepareQueueLength,omitempty"` +} + +type LoadAffinity struct { + // NodeSelector specifies the label selector to match nodes + NodeSelector metav1.LabelSelector `json:"nodeSelector"` +} + +type RuledConfigs struct { + // NodeSelector specifies the label selector to match nodes + NodeSelector metav1.LabelSelector `json:"nodeSelector"` + + // Number specifies the number value associated to the matched nodes + Number int `json:"number"` +} + +type BackupPVC struct { + // StorageClass is the name of storage class to be used by the backupPVC + StorageClass string `json:"storageClass,omitempty"` + + // ReadOnly sets the backupPVC's access mode as read only + ReadOnly bool `json:"readOnly,omitempty"` + + // SPCNoRelabeling sets Spec.SecurityContext.SELinux.Type to "spc_t" for the pod mounting the backupPVC + // ignored if ReadOnly is false + SPCNoRelabeling bool `json:"spcNoRelabeling,omitempty"` + + // Annotations permits setting annotations for the backupPVC + Annotations map[string]string `json:"annotations,omitempty"` +} + +type RestorePVC struct { + // IgnoreDelayBinding indicates to ignore delay binding the restorePVC when it is in WaitForFirstConsumer mode + IgnoreDelayBinding bool `json:"ignoreDelayBinding,omitempty"` +} + +type CachePVC struct { + // StorageClass specifies the storage class for cache PVC + StorageClass string `json:"storageClass,omitempty"` + + // ResidentThresholdInMB specifies the minimum size of the backup data to create cache PVC + ResidentThresholdInMB int64 `json:"residentThresholdInMB,omitempty"` +} + +type NodeAgentConfigs struct { + // LoadConcurrency is the config for data path load concurrency per node. + LoadConcurrency *LoadConcurrency `json:"loadConcurrency,omitempty"` + + // LoadAffinity is the config for data path load affinity. + LoadAffinity []*kube.LoadAffinity `json:"loadAffinity,omitempty"` + + // BackupPVCConfig is the config for backupPVC (intermediate PVC) of snapshot data movement + BackupPVCConfig map[string]BackupPVC `json:"backupPVC,omitempty"` + + // RestoreVCConfig is the config for restorePVC (intermediate PVC) of generic restore + RestorePVCConfig *RestorePVC `json:"restorePVC,omitempty"` + + // PodResources is the resource config for various types of pods launched by node-agent, i.e., data mover pods. + PodResources *kube.PodResources `json:"podResources,omitempty"` + + // PriorityClassName is the priority class name for data mover pods created by the node agent + PriorityClassName string `json:"priorityClassName,omitempty"` + + // PrivilegedFsBackup determines whether to create fs-backup pods as privileged pods + PrivilegedFsBackup bool `json:"privilegedFsBackup,omitempty"` + + // CachePVCConfig is the config for cachePVC + CachePVCConfig *CachePVC `json:"cachePVC,omitempty"` + + // PodAnnotations are annotations to be added to pods created by node-agent, i.e., data mover pods. + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + + // PodLabels are labels to be added to pods created by node-agent, i.e., data mover pods. + PodLabels map[string]string `json:"podLabels,omitempty"` +} diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/types/repo_maintenance.go b/vendor/github.com/vmware-tanzu/velero/pkg/types/repo_maintenance.go new file mode 100644 index 000000000..a5115ff92 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/pkg/types/repo_maintenance.go @@ -0,0 +1,42 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "github.com/vmware-tanzu/velero/pkg/util/kube" + +type JobConfigs struct { + // LoadAffinities is the config for repository maintenance job load affinity. + LoadAffinities []*kube.LoadAffinity `json:"loadAffinity,omitempty"` + + // PodResources is the config for the CPU and memory resources setting. + PodResources *kube.PodResources `json:"podResources,omitempty"` + + // KeepLatestMaintenanceJobs is the number of latest maintenance jobs to keep for the repository. + KeepLatestMaintenanceJobs *int `json:"keepLatestMaintenanceJobs,omitempty"` + + // PriorityClassName is the priority class name for the maintenance job pod + // Note: This is only read from the global configuration, not per-repository + PriorityClassName string `json:"priorityClassName,omitempty"` + + // PodAnnotations are annotations to be added to maintenance job pods. + // Note: This is only read from the global configuration, not per-repository + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + + // PodLabels are labels to be added to maintenance job pods. + // Note: This is only read from the global configuration, not per-repository + PodLabels map[string]string `json:"podLabels,omitempty"` +} diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/uploader/types.go b/vendor/github.com/vmware-tanzu/velero/pkg/uploader/types.go index fb79f7c9f..f69cbf072 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/uploader/types.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/uploader/types.go @@ -41,12 +41,8 @@ const ( // It will return an error if it's invalid. func ValidateUploaderType(t string) (string, error) { t = strings.TrimSpace(t) - if t != ResticType && t != KopiaType { - return "", fmt.Errorf("invalid uploader type '%s', valid upload types are: '%s', '%s'", t, ResticType, KopiaType) - } - - if t == ResticType { - return fmt.Sprintf("Uploader '%s' is deprecated, don't use it for new backups, otherwise the backups won't be available for restore when this functionality is removed in a future version of Velero", t), nil + if t != KopiaType { + return "", fmt.Errorf("invalid uploader type '%s', valid type: '%s'", t, KopiaType) } return "", nil diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/collections/includes_excludes.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/collections/includes_excludes.go index 1cff16f3e..ab63eaa72 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/util/collections/includes_excludes.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/collections/includes_excludes.go @@ -19,6 +19,8 @@ package collections import ( "strings" + "github.com/vmware-tanzu/velero/internal/resourcepolicies" + "github.com/gobwas/glob" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -30,6 +32,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/util/boolptr" + "github.com/vmware-tanzu/velero/pkg/util/wildcard" ) type globStringSet struct { @@ -53,6 +56,143 @@ func (gss globStringSet) match(match string) bool { return false } +// NamespaceIncludesExcludes adds some features to IncludesExcludes +// to handle namespace-specific functionality. In particular, it +// provides a way to list all namespaces included in order to determine +// overlap between backups, and it will be expanded in the future to +// handle namespace wildcard values +type NamespaceIncludesExcludes struct { + activeNamespaces []string + includesExcludes *IncludesExcludes + wildcardExpanded bool + wildcardResult []string +} + +func NewNamespaceIncludesExcludes() *NamespaceIncludesExcludes { + return &NamespaceIncludesExcludes{ + activeNamespaces: []string{}, + includesExcludes: NewIncludesExcludes(), + } +} + +func (nie *NamespaceIncludesExcludes) ActiveNamespaces(activeNamespaces []string) *NamespaceIncludesExcludes { + nie.activeNamespaces = activeNamespaces + return nie +} + +func (nie *NamespaceIncludesExcludes) IsWildcardExpanded() bool { + return nie.wildcardExpanded +} + +// Includes adds items to the includes list. '*' is a wildcard +// value meaning "include everything". +func (nie *NamespaceIncludesExcludes) Includes(includes ...string) *NamespaceIncludesExcludes { + nie.includesExcludes.Includes(includes...) + return nie +} + +// GetIncludes returns the items in the includes list +func (nie *NamespaceIncludesExcludes) GetIncludes() []string { + return nie.includesExcludes.GetIncludes() +} + +func (nie *NamespaceIncludesExcludes) GetExcludes() []string { + return nie.includesExcludes.GetExcludes() +} + +// SetIncludes sets the includes list to the given list +func (nie *NamespaceIncludesExcludes) SetIncludes(includes []string) *NamespaceIncludesExcludes { + nie.includesExcludes.includes = newGlobStringSet() + nie.includesExcludes.includes.Insert(includes...) + return nie +} + +// SetExcludes sets the excludes list to the given list +func (nie *NamespaceIncludesExcludes) SetExcludes(excludes []string) *NamespaceIncludesExcludes { + nie.includesExcludes.excludes = newGlobStringSet() + nie.includesExcludes.excludes.Insert(excludes...) + return nie +} + +// IncludesString returns a string containing all of the includes, separated by commas, or * if the +// list is empty. +func (nie *NamespaceIncludesExcludes) IncludesString() string { + return nie.includesExcludes.IncludesString() +} + +// Excludes adds items to the includes list. '*' is a wildcard +// value meaning "include everything". +func (nie *NamespaceIncludesExcludes) Excludes(excludes ...string) *NamespaceIncludesExcludes { + nie.includesExcludes.Excludes(excludes...) + return nie +} + +// IncludesString returns a string containing all of the excludes, separated by commas, or * if the +// list is empty. +func (nie *NamespaceIncludesExcludes) ExcludesString() string { + return nie.includesExcludes.ExcludesString() +} + +// ShouldInclude returns whether the specified item should be +// included or not. Everything in the includes list except those +// items in the excludes list should be included. +func (nie *NamespaceIncludesExcludes) ShouldInclude(s string) bool { + // Special case: if wildcard expansion occurred and resulted in an empty includes list, + // it means the wildcard pattern matched nothing, so we should include nothing. + // This differs from the default behavior where an empty includes list means "include everything". + if nie.wildcardExpanded && nie.includesExcludes.includes.Len() == 0 { + return false + } + return nie.includesExcludes.ShouldInclude(s) +} + +// IncludeEverything returns true if the includes list is empty or '*' +// and the excludes list is empty, or false otherwise. +func (nie *NamespaceIncludesExcludes) IncludeEverything() bool { + return nie.includesExcludes.IncludeEverything() +} + +// Attempts to expand wildcard patterns, if any, in the includes and excludes lists. +func (nie *NamespaceIncludesExcludes) ExpandIncludesExcludes() error { + includes := nie.GetIncludes() + excludes := nie.GetExcludes() + + if wildcard.ShouldExpandWildcards(includes, excludes) { + expandedIncludes, expandedExcludes, err := wildcard.ExpandWildcards( + nie.activeNamespaces, includes, excludes) + if err != nil { + return err + } + + nie.SetIncludes(expandedIncludes) + nie.SetExcludes(expandedExcludes) + nie.wildcardExpanded = true + } + + return nil +} + +// ResolveNamespaceList returns a list of all namespaces which will be backed up. +// The second return value indicates whether wildcard expansion was performed. +func (nie *NamespaceIncludesExcludes) ResolveNamespaceList() ([]string, error) { + // Check if this is being called by non-backup processing e.g. backup queue controller + if !nie.wildcardExpanded { + err := nie.ExpandIncludesExcludes() + if err != nil { + return nil, err + } + } + + outNamespaces := []string{} + for _, ns := range nie.activeNamespaces { + if nie.ShouldInclude(ns) { + outNamespaces = append(outNamespaces, ns) + } + } + nie.wildcardResult = outNamespaces + return nie.wildcardResult, nil +} + // IncludesExcludes is a type that manages lists of included // and excluded items. The logic implemented is that everything // in the included list except those items in the excluded list @@ -105,14 +245,63 @@ func (ie *IncludesExcludes) ShouldInclude(s string) bool { return ie.includes.Len() == 0 || ie.includes.Has("*") || ie.includes.match(s) } +// IncludesString returns a string containing all of the includes, separated by commas, or * if the +// list is empty. +func (ie *IncludesExcludes) IncludesString() string { + return asString(ie.GetIncludes(), "*") +} + +// ExcludesString returns a string containing all of the excludes, separated by commas, or if the +// list is empty. +func (ie *IncludesExcludes) ExcludesString() string { + return asString(ie.GetExcludes(), "") +} + +// IncludeEverything returns true if the includes list is empty or '*' +// and the excludes list is empty, or false otherwise. +func (ie *IncludesExcludes) IncludeEverything() bool { + return ie.excludes.Len() == 0 && (ie.includes.Len() == 0 || (ie.includes.Len() == 1 && ie.includes.Has("*"))) +} + +// GetResourceIncludesExcludes takes the lists of resources to include and exclude, uses the +// discovery helper to resolve them to fully-qualified group-resource names, and returns an +// IncludesExcludes list. +func GetResourceIncludesExcludes(helper discovery.Helper, includes, excludes []string) *IncludesExcludes { + resources := generateIncludesExcludes( + includes, + excludes, + func(item string) string { + gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion("")) + if err != nil { + // If we can't resolve it, return it as-is. This prevents the generated + // includes-excludes list from including *everything*, if none of the includes + // can be resolved. ref. https://github.com/vmware-tanzu/velero/issues/2461 + return item + } + + gr := gvr.GroupResource() + return gr.String() + }, + ) + + return resources +} + +func asString(in []string, empty string) string { + if len(in) == 0 { + return empty + } + return strings.Join(in, ", ") +} + // IncludesExcludesInterface is used as polymorphic IncludesExcludes for Global and scope // resources Include/Exclude. type IncludesExcludesInterface interface { - // Check whether the type name passed in by parameter should be included. + // ShouldInclude checks whether the type name passed in by parameter should be included. // typeName should be k8s.io/apimachinery/pkg/runtime/schema GroupResource's String() result. ShouldInclude(typeName string) bool - // Check whether the type name passed in by parameter should be excluded. + // ShouldExclude checks whether the type name passed in by parameter should be excluded. // typeName should be k8s.io/apimachinery/pkg/runtime/schema GroupResource's String() result. ShouldExclude(typeName string) bool } @@ -120,7 +309,7 @@ type IncludesExcludesInterface interface { type GlobalIncludesExcludes struct { resourceFilter IncludesExcludes includeClusterResources *bool - namespaceFilter IncludesExcludes + namespaceFilter NamespaceIncludesExcludes helper discovery.Helper logger logrus.FieldLogger @@ -188,10 +377,24 @@ func (ie *GlobalIncludesExcludes) ShouldExclude(typeName string) bool { return false } +func GetGlobalResourceIncludesExcludes(helper discovery.Helper, logger logrus.FieldLogger, includes, excludes []string, includeClusterResources *bool, nsIncludesExcludes NamespaceIncludesExcludes) *GlobalIncludesExcludes { + ret := &GlobalIncludesExcludes{ + resourceFilter: *GetResourceIncludesExcludes(helper, includes, excludes), + includeClusterResources: includeClusterResources, + namespaceFilter: nsIncludesExcludes, + helper: helper, + logger: logger, + } + + logger.Infof("Including resources: %s", ret.resourceFilter.IncludesString()) + logger.Infof("Excluding resources: %s", ret.resourceFilter.ExcludesString()) + return ret +} + type ScopeIncludesExcludes struct { - namespaceScopedResourceFilter IncludesExcludes // namespace-scoped resource filter - clusterScopedResourceFilter IncludesExcludes // cluster-scoped resource filter - namespaceFilter IncludesExcludes // namespace filter + namespaceScopedResourceFilter IncludesExcludes // namespace-scoped resource filter + clusterScopedResourceFilter IncludesExcludes // cluster-scoped resource filter + namespaceFilter NamespaceIncludesExcludes // namespace filter helper discovery.Helper logger logrus.FieldLogger @@ -259,32 +462,67 @@ func (ie *ScopeIncludesExcludes) ShouldExclude(typeName string) bool { return false } -// IncludesString returns a string containing all of the includes, separated by commas, or * if the -// list is empty. -func (ie *IncludesExcludes) IncludesString() string { - return asString(ie.GetIncludes(), "*") -} - -// ExcludesString returns a string containing all of the excludes, separated by commas, or if the -// list is empty. -func (ie *IncludesExcludes) ExcludesString() string { - return asString(ie.GetExcludes(), "") -} - -func asString(in []string, empty string) string { - if len(in) == 0 { - return empty +func (ie *ScopeIncludesExcludes) CombineWithPolicy(policy *resourcepolicies.IncludeExcludePolicy) { + if policy == nil { + return } - return strings.Join(in, ", ") -} - -// IncludeEverything returns true if the includes list is empty or '*' -// and the excludes list is empty, or false otherwise. -func (ie *IncludesExcludes) IncludeEverything() bool { - return ie.excludes.Len() == 0 && (ie.includes.Len() == 0 || (ie.includes.Len() == 1 && ie.includes.Has("*"))) + mapFunc := scopeResourceMapFunc(ie.helper) + for _, item := range policy.ExcludedNamespaceScopedResources { + resolvedItem := mapFunc(item, true) + if resolvedItem == "" { + continue + } + // The existing includeExcludes in the struct has higher priority, therefore, we should only add the item to the filter + // when the struct does not include this item and this item is not yet in the excludes filter. + if !ie.namespaceScopedResourceFilter.includes.match(resolvedItem) && + !ie.namespaceScopedResourceFilter.excludes.match(resolvedItem) { + ie.namespaceScopedResourceFilter.Excludes(resolvedItem) + } + } + for _, item := range policy.IncludedNamespaceScopedResources { + resolvedItem := mapFunc(item, true) + if resolvedItem == "" { + continue + } + // The existing includeExcludes in the struct has higher priority, therefore, we should only add the item to the filter + // when the struct does not exclude this item and this item is not yet in the includes filter. + if !ie.namespaceScopedResourceFilter.includes.match(resolvedItem) && + !ie.namespaceScopedResourceFilter.excludes.match(resolvedItem) { + ie.namespaceScopedResourceFilter.Includes(resolvedItem) + } + } + for _, item := range policy.ExcludedClusterScopedResources { + resolvedItem := mapFunc(item, false) + if resolvedItem == "" { + continue + } + if !ie.clusterScopedResourceFilter.includes.match(resolvedItem) && + !ie.clusterScopedResourceFilter.excludes.match(resolvedItem) { + // The existing includeExcludes in the struct has higher priority, therefore, we should only add the item to the filter + // when the struct does not exclude this item and this item is not yet in the includes filter. + ie.clusterScopedResourceFilter.Excludes(resolvedItem) + } + } + for _, item := range policy.IncludedClusterScopedResources { + resolvedItem := mapFunc(item, false) + if resolvedItem == "" { + continue + } + if !ie.clusterScopedResourceFilter.includes.match(resolvedItem) && + !ie.clusterScopedResourceFilter.excludes.match(resolvedItem) { + // The existing includeExcludes in the struct has higher priority, therefore, we should only add the item to the filter + // when the struct does not exclude this item and this item is not yet in the includes filter. + ie.clusterScopedResourceFilter.Includes(resolvedItem) + } + } + ie.logger.Infof("Scoped resource includes/excludes after combining with resource policy") + ie.logger.Infof("Including namespace-scoped resources: %s", ie.namespaceScopedResourceFilter.IncludesString()) + ie.logger.Infof("Excluding namespace-scoped resources: %s", ie.namespaceScopedResourceFilter.ExcludesString()) + ie.logger.Infof("Including cluster-scoped resources: %s", ie.clusterScopedResourceFilter.GetIncludes()) + ie.logger.Infof("Excluding cluster-scoped resources: %s", ie.clusterScopedResourceFilter.ExcludesString()) } -func newScopeIncludesExcludes(nsIncludesExcludes IncludesExcludes, helper discovery.Helper, logger logrus.FieldLogger) *ScopeIncludesExcludes { +func newScopeIncludesExcludes(nsIncludesExcludes NamespaceIncludesExcludes, helper discovery.Helper, logger logrus.FieldLogger) *ScopeIncludesExcludes { ret := &ScopeIncludesExcludes{ namespaceScopedResourceFilter: IncludesExcludes{ includes: newGlobStringSet(), @@ -302,6 +540,43 @@ func newScopeIncludesExcludes(nsIncludesExcludes IncludesExcludes, helper discov return ret } +// GetScopeResourceIncludesExcludes function is similar with GetResourceIncludesExcludes, +// but it's used for scoped Includes/Excludes, and can handle both cluster-scoped and namespace-scoped resources. +func GetScopeResourceIncludesExcludes(helper discovery.Helper, logger logrus.FieldLogger, namespaceIncludes, namespaceExcludes, clusterIncludes, clusterExcludes []string, nsIncludesExcludes NamespaceIncludesExcludes) *ScopeIncludesExcludes { + ret := generateScopedIncludesExcludes( + namespaceIncludes, + namespaceExcludes, + clusterIncludes, + clusterExcludes, + scopeResourceMapFunc(helper), + nsIncludesExcludes, + helper, + logger, + ) + logger.Infof("Scoped resource includes/excludes after initialization") + logger.Infof("Including namespace-scoped resources: %s", ret.namespaceScopedResourceFilter.IncludesString()) + logger.Infof("Excluding namespace-scoped resources: %s", ret.namespaceScopedResourceFilter.ExcludesString()) + logger.Infof("Including cluster-scoped resources: %s", ret.clusterScopedResourceFilter.GetIncludes()) + logger.Infof("Excluding cluster-scoped resources: %s", ret.clusterScopedResourceFilter.ExcludesString()) + + return ret +} + +func scopeResourceMapFunc(helper discovery.Helper) func(string, bool) string { + return func(item string, namespaced bool) string { + gvr, resource, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion("")) + if err != nil { + return item + } + if resource.Namespaced != namespaced { + return "" + } + + gr := gvr.GroupResource() + return gr.String() + } +} + // ValidateIncludesExcludes checks provided lists of included and excluded // items to ensure they are a valid set of IncludesExcludes data. func ValidateIncludesExcludes(includesList, excludesList []string) []error { @@ -391,10 +666,22 @@ func validateNamespaceName(ns string) []error { return nil } - // Kubernetes does not allow asterisks in namespaces but Velero uses them as - // wildcards. Replace asterisks with an arbitrary letter to pass Kubernetes - // validation. - tmpNamespace := strings.ReplaceAll(ns, "*", "x") + // Validate the namespace name to ensure it is a valid wildcard pattern + if err := wildcard.ValidateNamespaceName(ns); err != nil { + return []error{err} + } + + // Kubernetes does not allow wildcard characters in namespaces but Velero uses them + // for glob patterns. Replace wildcard characters with valid characters to pass + // Kubernetes validation. + tmpNamespace := ns + + // Replace glob wildcard characters with valid alphanumeric characters + // Note: Validation of wildcard patterns is handled by the wildcard package. + tmpNamespace = strings.ReplaceAll(tmpNamespace, "*", "x") // matches any sequence + tmpNamespace = strings.ReplaceAll(tmpNamespace, "?", "x") // matches single character + tmpNamespace = strings.ReplaceAll(tmpNamespace, "[", "x") // character class start + tmpNamespace = strings.ReplaceAll(tmpNamespace, "]", "x") // character class end if errMsgs := validation.ValidateNamespaceName(tmpNamespace, false); errMsgs != nil { for _, msg := range errMsgs { @@ -444,7 +731,7 @@ func generateIncludesExcludes(includes, excludes []string, mapFunc func(string) // generateScopedIncludesExcludes function is similar with generateIncludesExcludes, // but it's used for scoped Includes/Excludes. -func generateScopedIncludesExcludes(namespacedIncludes, namespacedExcludes, clusterIncludes, clusterExcludes []string, mapFunc func(string, bool) string, nsIncludesExcludes IncludesExcludes, helper discovery.Helper, logger logrus.FieldLogger) *ScopeIncludesExcludes { +func generateScopedIncludesExcludes(namespacedIncludes, namespacedExcludes, clusterIncludes, clusterExcludes []string, mapFunc func(string, bool) string, nsIncludesExcludes NamespaceIncludesExcludes, helper discovery.Helper, logger logrus.FieldLogger) *ScopeIncludesExcludes { res := newScopeIncludesExcludes(nsIncludesExcludes, helper, logger) generateFilter(res.namespaceScopedResourceFilter.includes, namespacedIncludes, mapFunc, true) @@ -470,97 +757,16 @@ func generateFilter(filter globStringSet, resources []string, mapFunc func(strin } } -// GetResourceIncludesExcludes takes the lists of resources to include and exclude, uses the -// discovery helper to resolve them to fully-qualified group-resource names, and returns an -// IncludesExcludes list. -func GetResourceIncludesExcludes(helper discovery.Helper, includes, excludes []string) *IncludesExcludes { - resources := generateIncludesExcludes( - includes, - excludes, - func(item string) string { - gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion("")) - if err != nil { - // If we can't resolve it, return it as-is. This prevents the generated - // includes-excludes list from including *everything*, if none of the includes - // can be resolved. ref. https://github.com/vmware-tanzu/velero/issues/2461 - return item - } - - gr := gvr.GroupResource() - return gr.String() - }, - ) - - return resources -} - -func GetGlobalResourceIncludesExcludes(helper discovery.Helper, logger logrus.FieldLogger, includes, excludes []string, includeClusterResources *bool, nsIncludesExcludes IncludesExcludes) *GlobalIncludesExcludes { - ret := &GlobalIncludesExcludes{ - resourceFilter: *GetResourceIncludesExcludes(helper, includes, excludes), - includeClusterResources: includeClusterResources, - namespaceFilter: nsIncludesExcludes, - helper: helper, - logger: logger, - } - - logger.Infof("Including resources: %s", ret.resourceFilter.IncludesString()) - logger.Infof("Excluding resources: %s", ret.resourceFilter.ExcludesString()) - return ret -} - -// GetScopeResourceIncludesExcludes function is similar with GetResourceIncludesExcludes, -// but it's used for scoped Includes/Excludes, and can handle both cluster-scoped and namespace-scoped resources. -func GetScopeResourceIncludesExcludes(helper discovery.Helper, logger logrus.FieldLogger, namespaceIncludes, namespaceExcludes, clusterIncludes, clusterExcludes []string, nsIncludesExcludes IncludesExcludes) *ScopeIncludesExcludes { - ret := generateScopedIncludesExcludes( - namespaceIncludes, - namespaceExcludes, - clusterIncludes, - clusterExcludes, - func(item string, namespaced bool) string { - gvr, resource, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion("")) - if err != nil { - return item - } - if resource.Namespaced != namespaced { - return "" - } - - gr := gvr.GroupResource() - return gr.String() - }, - nsIncludesExcludes, - helper, - logger, - ) - logger.Infof("Including namespace-scoped resources: %s", ret.namespaceScopedResourceFilter.IncludesString()) - logger.Infof("Excluding namespace-scoped resources: %s", ret.namespaceScopedResourceFilter.ExcludesString()) - logger.Infof("Including cluster-scoped resources: %s", ret.clusterScopedResourceFilter.GetIncludes()) - logger.Infof("Excluding cluster-scoped resources: %s", ret.clusterScopedResourceFilter.ExcludesString()) - - return ret -} - // UseOldResourceFilters checks whether to use old resource filters (IncludeClusterResources, // IncludedResources and ExcludedResources), depending the backup's filters setting. // New filters are IncludedClusterScopedResources, ExcludedClusterScopedResources, // IncludedNamespaceScopedResources and ExcludedNamespaceScopedResources. +// If all resource filters are none, it is treated as using new parameter filters. func UseOldResourceFilters(backupSpec velerov1api.BackupSpec) bool { - // If all resource filters are none, it is treated as using old parameter filters. - if backupSpec.IncludeClusterResources == nil && - len(backupSpec.IncludedResources) == 0 && - len(backupSpec.ExcludedResources) == 0 && - len(backupSpec.IncludedClusterScopedResources) == 0 && - len(backupSpec.ExcludedClusterScopedResources) == 0 && - len(backupSpec.IncludedNamespaceScopedResources) == 0 && - len(backupSpec.ExcludedNamespaceScopedResources) == 0 { - return true - } - if backupSpec.IncludeClusterResources != nil || len(backupSpec.IncludedResources) > 0 || len(backupSpec.ExcludedResources) > 0 { return true } - return false } diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/event.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/event.go index 69632bd12..ae514f4da 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/event.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/event.go @@ -16,13 +16,14 @@ limitations under the License. package kube import ( + "context" "math" "sync" "time" "github.com/google/uuid" "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" + corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -65,7 +66,7 @@ func NewEventRecorder(kubeClient kubernetes.Interface, scheme *runtime.Scheme, e // The callers (i.e., data mover pods) have controlled the rate and total number outside. E.g., the progress is designed to be updated every 10 seconds and is changeable. BurstSize: math.MaxInt32, MaxEvents: 1, - MessageFunc: func(event *v1.Event) string { + MessageFunc: func(event *corev1api.Event) string { return event.Message }, }) @@ -75,7 +76,7 @@ func NewEventRecorder(kubeClient kubernetes.Interface, scheme *runtime.Scheme, e sink: kubeClient.CoreV1().Events(""), }) - res.recorder = res.broadcaster.NewRecorder(scheme, v1.EventSource{ + res.recorder = res.broadcaster.NewRecorder(scheme, corev1api.EventSource{ Component: eventSource, Host: eventNode, }) @@ -88,9 +89,9 @@ func (er *eventRecorder) Event(object runtime.Object, warning bool, reason strin return } - eventType := v1.EventTypeNormal + eventType := corev1api.EventTypeNormal if warning { - eventType = v1.EventTypeWarning + eventType = corev1api.EventTypeWarning } if len(a) > 0 { @@ -113,7 +114,7 @@ func (er *eventRecorder) EndingEvent(object runtime.Object, warning bool, reason if er.endingSentinel == nil { sentinelEvent = uuid.NewString() er.endingSentinel = &eventElement{ - t: v1.EventTypeNormal, + t: corev1api.EventTypeNormal, r: sentinelEvent, m: sentinelEvent, sinked: make(chan struct{}), @@ -161,7 +162,7 @@ func (er *eventRecorder) Shutdown() { er.lock.Unlock() } -func (er *eventRecorder) sentinelWatch(event *v1.Event) bool { +func (er *eventRecorder) sentinelWatch(event *corev1api.Event) bool { er.lock.Lock() defer er.lock.Unlock() @@ -177,18 +178,18 @@ func (er *eventRecorder) sentinelWatch(event *v1.Event) bool { return false } -func (es *eventSink) Create(event *v1.Event) (*v1.Event, error) { +func (es *eventSink) Create(event *corev1api.Event) (*corev1api.Event, error) { if es.recorder.sentinelWatch(event) { return event, nil } - return es.sink.CreateWithEventNamespace(event) + return es.sink.CreateWithEventNamespaceWithContext(context.Background(), event) } -func (es *eventSink) Update(event *v1.Event) (*v1.Event, error) { - return es.sink.UpdateWithEventNamespace(event) +func (es *eventSink) Update(event *corev1api.Event) (*corev1api.Event, error) { + return es.sink.UpdateWithEventNamespaceWithContext(context.Background(), event) } -func (es *eventSink) Patch(event *v1.Event, data []byte) (*v1.Event, error) { - return es.sink.PatchWithEventNamespace(event, data) +func (es *eventSink) Patch(event *corev1api.Event, data []byte) (*corev1api.Event, error) { + return es.sink.PatchWithEventNamespaceWithContext(context.Background(), event, data) } diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/node.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/node.go index da68183a5..ba6853624 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/node.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/node.go @@ -17,7 +17,6 @@ package kube import ( "context" - "fmt" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -34,6 +33,11 @@ const ( NodeOSLabel = "kubernetes.io/os" ) +var realNodeOSMap = map[string]string{ + "linux": NodeOSLinux, + "windows": NodeOSWindows, +} + func IsLinuxNode(ctx context.Context, nodeName string, client client.Client) error { node := &corev1api.Node{} if err := client.Get(ctx, types.NamespacedName{Name: nodeName}, node); err != nil { @@ -41,12 +45,11 @@ func IsLinuxNode(ctx context.Context, nodeName string, client client.Client) err } os, found := node.Labels[NodeOSLabel] - if !found { return errors.Errorf("no os type label for node %s", nodeName) } - if os != NodeOSLinux { + if getRealOS(os) != NodeOSLinux { return errors.Errorf("os type %s for node %s is not linux", os, nodeName) } @@ -72,7 +75,7 @@ func withOSNode(ctx context.Context, client client.Client, osType string, log lo for _, node := range nodeList.Items { os, found := node.Labels[NodeOSLabel] - if os == osType { + if getRealOS(os) == osType { return true } @@ -98,7 +101,7 @@ func GetNodeOS(ctx context.Context, nodeName string, nodeClient corev1client.Cor return "", nil } - return node.Labels[NodeOSLabel], nil + return getRealOS(node.Labels[NodeOSLabel]), nil } func HasNodeWithOS(ctx context.Context, os string, nodeClient corev1client.CoreV1Interface) error { @@ -106,14 +109,29 @@ func HasNodeWithOS(ctx context.Context, os string, nodeClient corev1client.CoreV return errors.New("invalid node OS") } - nodes, err := nodeClient.Nodes().List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", NodeOSLabel, os)}) + nodes, err := nodeClient.Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return errors.Wrapf(err, "error listing nodes with OS %s", os) } - if len(nodes.Items) == 0 { - return errors.Errorf("node with OS %s doesn't exist", os) + for _, node := range nodes.Items { + osLabel, found := node.Labels[NodeOSLabel] + if !found { + continue + } + + if getRealOS(osLabel) == os { + return nil + } } - return nil + return errors.Errorf("node with OS %s doesn't exist", os) +} + +func getRealOS(osLabel string) string { + if os, found := realNodeOSMap[osLabel]; !found { + return NodeOSLinux + } else { + return os + } } diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/pod.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/pod.go index 04457f0d4..4ff05b43e 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/pod.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/pod.go @@ -19,6 +19,7 @@ import ( "context" "fmt" "io" + "os" "time" "github.com/pkg/errors" @@ -33,13 +34,18 @@ import ( type LoadAffinity struct { // NodeSelector specifies the label selector to match nodes NodeSelector metav1.LabelSelector `json:"nodeSelector"` + + // StorageClass specifies the VGDPs the LoadAffinity applied to. If the StorageClass doesn't have value, it applies to all. If not, it applies to only the VGDPs that use this StorageClass. + StorageClass string `json:"storageClass"` } type PodResources struct { - CPURequest string `json:"cpuRequest,omitempty"` - MemoryRequest string `json:"memoryRequest,omitempty"` - CPULimit string `json:"cpuLimit,omitempty"` - MemoryLimit string `json:"memoryLimit,omitempty"` + CPURequest string `json:"cpuRequest,omitempty"` + CPULimit string `json:"cpuLimit,omitempty"` + MemoryRequest string `json:"memoryRequest,omitempty"` + MemoryLimit string `json:"memoryLimit,omitempty"` + EphemeralStorageRequest string `json:"ephemeralStorageRequest,omitempty"` + EphemeralStorageLimit string `json:"ephemeralStorageLimit,omitempty"` } // IsPodRunning does a well-rounded check to make sure the specified pod is running stably. @@ -136,7 +142,13 @@ func EnsureDeletePod(ctx context.Context, podGetter corev1client.CoreV1Interface func IsPodUnrecoverable(pod *corev1api.Pod, log logrus.FieldLogger) (bool, string) { // Check the Phase field if pod.Status.Phase == corev1api.PodFailed || pod.Status.Phase == corev1api.PodUnknown { - message := GetPodTerminateMessage(pod) + message := "" + if pod.Status.Message != "" { + message += pod.Status.Message + "/" + } + + message += GetPodTerminateMessage(pod) + log.Warnf("Pod is in abnormal state %s, message [%s]", pod.Status.Phase, message) return true, fmt.Sprintf("Pod is in abnormal state [%s], message [%s]", pod.Status.Phase, message) } @@ -220,14 +232,9 @@ func CollectPodLogs(ctx context.Context, podGetter corev1client.CoreV1Interface, return nil } -func ToSystemAffinity(loadAffinities []*LoadAffinity) *corev1api.Affinity { - if len(loadAffinities) == 0 { - return nil - } - nodeSelectorTermList := make([]corev1api.NodeSelectorTerm, 0) - - for _, loadAffinity := range loadAffinities { - requirements := []corev1api.NodeSelectorRequirement{} +func ToSystemAffinity(loadAffinity *LoadAffinity, volumeTopology *corev1api.NodeSelector) *corev1api.Affinity { + requirements := []corev1api.NodeSelectorRequirement{} + if loadAffinity != nil { for k, v := range loadAffinity.NodeSelector.MatchLabels { requirements = append(requirements, corev1api.NodeSelectorRequirement{ Key: k, @@ -243,33 +250,98 @@ func ToSystemAffinity(loadAffinities []*LoadAffinity) *corev1api.Affinity { Operator: corev1api.NodeSelectorOperator(exp.Operator), }) } - - nodeSelectorTermList = append( - nodeSelectorTermList, - corev1api.NodeSelectorTerm{ - MatchExpressions: requirements, - }, - ) } - if len(nodeSelectorTermList) > 0 { - result := new(corev1api.Affinity) - result.NodeAffinity = new(corev1api.NodeAffinity) - result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = new(corev1api.NodeSelector) - result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = nodeSelectorTermList + result := new(corev1api.Affinity) + result.NodeAffinity = new(corev1api.NodeAffinity) + result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = new(corev1api.NodeSelector) + + if volumeTopology != nil { + result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, volumeTopology.NodeSelectorTerms...) + } else if len(requirements) > 0 { + result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = make([]corev1api.NodeSelectorTerm, 1) + } else { + return nil + } - return result + for i := range result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { + result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[i].MatchExpressions = append(result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[i].MatchExpressions, requirements...) } - return nil + return result } -func DiagnosePod(pod *corev1api.Pod) string { - diag := fmt.Sprintf("Pod %s/%s, phase %s, node name %s\n", pod.Namespace, pod.Name, pod.Status.Phase, pod.Spec.NodeName) +func DiagnosePod(pod *corev1api.Pod, events *corev1api.EventList) string { + diag := fmt.Sprintf("Pod %s/%s, phase %s, node name %s, message %s\n", pod.Namespace, pod.Name, pod.Status.Phase, pod.Spec.NodeName, pod.Status.Message) for _, condition := range pod.Status.Conditions { diag += fmt.Sprintf("Pod condition %s, status %s, reason %s, message %s\n", condition.Type, condition.Status, condition.Reason, condition.Message) } + if events != nil { + for _, e := range events.Items { + if e.InvolvedObject.UID == pod.UID && e.Type == corev1api.EventTypeWarning { + diag += fmt.Sprintf("Pod event reason %s, message %s\n", e.Reason, e.Message) + } + } + } + return diag } + +var funcExit = os.Exit +var funcCreateFile = os.Create + +func ExitPodWithMessage(logger logrus.FieldLogger, succeed bool, message string, a ...any) { + exitCode := 0 + if !succeed { + exitCode = 1 + } + + toWrite := fmt.Sprintf(message, a...) + + podFile, err := funcCreateFile("/dev/termination-log") + if err != nil { + logger.WithError(err).Error("Failed to create termination log file") + exitCode = 1 + } else { + if _, err := podFile.WriteString(toWrite); err != nil { + logger.WithError(err).Error("Failed to write error to termination log file") + exitCode = 1 + } + + podFile.Close() + } + + funcExit(exitCode) +} + +// GetLoadAffinityByStorageClass retrieves the LoadAffinity from the parameter affinityList. +// The function first try to find by the scName. If there is no such LoadAffinity, +// it will try to get the LoadAffinity whose StorageClass has no value. +func GetLoadAffinityByStorageClass( + affinityList []*LoadAffinity, + scName string, + logger logrus.FieldLogger, +) *LoadAffinity { + var globalAffinity *LoadAffinity + + for _, affinity := range affinityList { + if affinity.StorageClass == scName { + logger.WithField("StorageClass", scName).Info("Found pod's affinity setting per StorageClass.") + return affinity + } + + if affinity.StorageClass == "" && globalAffinity == nil { + globalAffinity = affinity + } + } + + if globalAffinity != nil { + logger.Info("Use the Global affinity for pod.") + } else { + logger.Info("No Affinity is found for pod.") + } + + return globalAffinity +} diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/predicate.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/predicate.go index 9ac9d9894..ce5bb85f0 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/predicate.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/predicate.go @@ -47,15 +47,6 @@ func (SpecChangePredicate) Update(e event.UpdateEvent) bool { return !reflect.DeepEqual(oldSpec.Interface(), newSpec.Interface()) } -// NewGenericEventPredicate creates a new Predicate that checks the Generic event with the provided func -func NewGenericEventPredicate(f func(object client.Object) bool) predicate.Predicate { - return predicate.Funcs{ - GenericFunc: func(event event.GenericEvent) bool { - return f(event.Object) - }, - } -} - // NewAllEventPredicate creates a new Predicate that checks all the events with the provided func func NewAllEventPredicate(f func(object client.Object) bool) predicate.Predicate { return predicate.Funcs{ @@ -74,25 +65,6 @@ func NewAllEventPredicate(f func(object client.Object) bool) predicate.Predicate } } -// NewUpdateEventPredicate creates a new Predicate that checks the update events with the provided func -// and ignore others -func NewUpdateEventPredicate(f func(client.Object, client.Object) bool) predicate.Predicate { - return predicate.Funcs{ - UpdateFunc: func(event event.UpdateEvent) bool { - return f(event.ObjectOld, event.ObjectNew) - }, - CreateFunc: func(event event.CreateEvent) bool { - return false - }, - DeleteFunc: func(event event.DeleteEvent) bool { - return false - }, - GenericFunc: func(event event.GenericEvent) bool { - return false - }, - } -} - // FalsePredicate always returns false for all kinds of events type FalsePredicate struct{} @@ -116,19 +88,32 @@ func (f FalsePredicate) Generic(event.GenericEvent) bool { return false } -func NewCreateEventPredicate(f func(client.Object) bool) predicate.Predicate { +// NewGenericEventPredicate creates a new Predicate that checks the Generic event with the provided func +func NewGenericEventPredicate(f func(object client.Object) bool) predicate.Predicate { return predicate.Funcs{ - CreateFunc: func(event event.CreateEvent) bool { - return f(event.Object) - }, - DeleteFunc: func(event event.DeleteEvent) bool { - return false - }, GenericFunc: func(event event.GenericEvent) bool { - return false + return f(event.Object) }, + } +} + +// NewUpdateEventPredicate creates a new Predicate that checks the Update event with the provided func +func NewUpdateEventPredicate( + f func(objectOld client.Object, objectNew client.Object) bool, +) predicate.Predicate { + return predicate.Funcs{ UpdateFunc: func(event event.UpdateEvent) bool { - return false + return f(event.ObjectOld, event.ObjectNew) + }, + } +} + +func NewCreateEventPredicate( + f func(object client.Object) bool, +) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event event.CreateEvent) bool { + return f(event.Object) }, } } diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/priority_class.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/priority_class.go new file mode 100644 index 000000000..3f2ddc256 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/priority_class.go @@ -0,0 +1,64 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "context" + + "github.com/sirupsen/logrus" + schedulingv1 "k8s.io/api/scheduling/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ValidatePriorityClass checks if the specified priority class exists in the cluster +// Returns true if the priority class exists or if priorityClassName is empty +// Returns false if the priority class doesn't exist or validation fails +// Logs warnings when the priority class doesn't exist +func ValidatePriorityClass(ctx context.Context, kubeClient kubernetes.Interface, priorityClassName string, logger logrus.FieldLogger) bool { + if priorityClassName == "" { + return true + } + + _, err := kubeClient.SchedulingV1().PriorityClasses().Get(ctx, priorityClassName, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + logger.Warnf("Priority class %q not found in cluster. Pod creation may fail if the priority class doesn't exist when pods are scheduled.", priorityClassName) + } else { + logger.WithError(err).Warnf("Failed to validate priority class %q", priorityClassName) + } + return false + } + logger.Infof("Validated priority class %q exists in cluster", priorityClassName) + return true +} + +// ValidatePriorityClassWithClient checks if the specified priority class exists in the cluster using controller-runtime client +// Returns nil if the priority class exists or if priorityClassName is empty +// Returns error if the priority class doesn't exist or validation fails +func ValidatePriorityClassWithClient(ctx context.Context, cli client.Client, priorityClassName string) error { + if priorityClassName == "" { + return nil + } + + priorityClass := &schedulingv1.PriorityClass{} + err := cli.Get(ctx, types.NamespacedName{Name: priorityClassName}, priorityClass) + return err +} diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/pvc_pv.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/pvc_pv.go index 3ccba32ae..d5d2e2041 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/pvc_pv.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/pvc_pv.go @@ -305,8 +305,14 @@ func SetPVReclaimPolicy(ctx context.Context, pvGetter corev1client.CoreV1Interfa // WaitPVCConsumed waits for a PVC to be consumed by a pod so that the selected node is set by the pod scheduling; or does // nothing if the consuming doesn't affect the PV provision. // The latest PVC and the selected node will be returned. -func WaitPVCConsumed(ctx context.Context, pvcGetter corev1client.CoreV1Interface, pvc string, namespace string, - storageClient storagev1.StorageV1Interface, timeout time.Duration, ignoreConsume bool) (string, *corev1api.PersistentVolumeClaim, error) { +func WaitPVCConsumed( + ctx context.Context, + pvcGetter corev1client.CoreV1Interface, + pvc string, namespace string, + storageClient storagev1.StorageV1Interface, + timeout time.Duration, + ignoreConsume bool, +) (string, *corev1api.PersistentVolumeClaim, error) { selectedNode := "" var updated *corev1api.PersistentVolumeClaim var storageClass *storagev1api.StorageClass @@ -411,19 +417,19 @@ func MakePodPVCAttachment(volumeName string, volumeMode *corev1api.PersistentVol return volumeMounts, volumeDevices, volumePath } +// GetPVForPVC returns the PersistentVolume backing a PVC +// returns PV, error. +// PV will be nil on error func GetPVForPVC( pvc *corev1api.PersistentVolumeClaim, crClient crclient.Client, ) (*corev1api.PersistentVolume, error) { if pvc.Spec.VolumeName == "" { - return nil, errors.Errorf("PVC %s/%s has no volume backing this claim", - pvc.Namespace, pvc.Name) + return nil, errors.Errorf("PVC %s/%s has no volume backing this claim", pvc.Namespace, pvc.Name) } if pvc.Status.Phase != corev1api.ClaimBound { - // TODO: confirm if this PVC should be snapshotted if it has no PV bound - return nil, - errors.Errorf("PVC %s/%s is in phase %v and is not bound to a volume", - pvc.Namespace, pvc.Name, pvc.Status.Phase) + return nil, errors.Errorf("PVC %s/%s is in phase %v and is not bound to a volume", + pvc.Namespace, pvc.Name, pvc.Status.Phase) } pv := &corev1api.PersistentVolume{} @@ -457,8 +463,18 @@ func GetPVCForPodVolume(vol *corev1api.Volume, pod *corev1api.Pod, crClient crcl return pvc, nil } -func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim) string { - return fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName) +func DiagnosePVC(pvc *corev1api.PersistentVolumeClaim, events *corev1api.EventList) string { + diag := fmt.Sprintf("PVC %s/%s, phase %s, binding to %s\n", pvc.Namespace, pvc.Name, pvc.Status.Phase, pvc.Spec.VolumeName) + + if events != nil { + for _, e := range events.Items { + if e.InvolvedObject.UID == pvc.UID && e.Type == corev1api.EventTypeWarning { + diag += fmt.Sprintf("PVC event reason %s, message %s\n", e.Reason, e.Message) + } + } + } + + return diag } func DiagnosePV(pv *corev1api.PersistentVolume) string { @@ -548,3 +564,45 @@ func GetPVAttachedNode(ctx context.Context, pv string, storageClient storagev1.S return "", nil } + +func GetPVAttachedNodes(ctx context.Context, pv string, storageClient storagev1.StorageV1Interface) ([]string, error) { + vaList, err := storageClient.VolumeAttachments().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "error listing volumeattachment") + } + + nodes := []string{} + for _, va := range vaList.Items { + if va.Spec.Source.PersistentVolumeName != nil && *va.Spec.Source.PersistentVolumeName == pv { + nodes = append(nodes, va.Spec.NodeName) + } + } + + return nodes, nil +} + +func GetVolumeTopology(ctx context.Context, volumeClient corev1client.CoreV1Interface, storageClient storagev1.StorageV1Interface, pvName string, scName string) (*corev1api.NodeSelector, error) { + if pvName == "" || scName == "" { + return nil, errors.Errorf("invalid parameter, pv %s, sc %s", pvName, scName) + } + + sc, err := storageClient.StorageClasses().Get(ctx, scName, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "error getting storage class %s", scName) + } + + if sc.VolumeBindingMode == nil || *sc.VolumeBindingMode != storagev1api.VolumeBindingWaitForFirstConsumer { + return nil, nil + } + + pv, err := volumeClient.PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "error getting PV %s", pvName) + } + + if pv.Spec.NodeAffinity == nil { + return nil, nil + } + + return pv.Spec.NodeAffinity.Required, nil +} diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/resource_requirements.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/resource_requirements.go index a1974516c..12cf7a79c 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/resource_requirements.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/resource_requirements.go @@ -18,17 +18,39 @@ package kube import ( "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" + corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + + "github.com/vmware-tanzu/velero/pkg/constant" ) -// ParseResourceRequirements takes a set of CPU and memory requests and limit string +// ParseCPUAndMemoryResources is a helper function that parses CPU and memory requests and limits, +// using default values for ephemeral storage. +func ParseCPUAndMemoryResources(cpuRequest, memRequest, cpuLimit, memLimit string) (corev1api.ResourceRequirements, error) { + return ParseResourceRequirements( + cpuRequest, + memRequest, + constant.DefaultEphemeralStorageRequest, + cpuLimit, + memLimit, + constant.DefaultEphemeralStorageLimit, + ) +} + +// ParseResourceRequirements takes a set of CPU, memory, ephemeral storage requests and limit string // values and returns a ResourceRequirements struct to be used in a Container. // An error is returned if we cannot parse the request/limit. -func ParseResourceRequirements(cpuRequest, memRequest, cpuLimit, memLimit string) (corev1.ResourceRequirements, error) { - resources := corev1.ResourceRequirements{ - Requests: corev1.ResourceList{}, - Limits: corev1.ResourceList{}, +func ParseResourceRequirements( + cpuRequest, + memRequest, + ephemeralStorageRequest, + cpuLimit, + memLimit, + ephemeralStorageLimit string, +) (corev1api.ResourceRequirements, error) { + resources := corev1api.ResourceRequirements{ + Requests: corev1api.ResourceList{}, + Limits: corev1api.ResourceList{}, } parsedCPURequest, err := resource.ParseQuantity(cpuRequest) @@ -41,6 +63,11 @@ func ParseResourceRequirements(cpuRequest, memRequest, cpuLimit, memLimit string return resources, errors.Wrapf(err, `couldn't parse memory request "%s"`, memRequest) } + parsedEphemeralStorageRequest, err := resource.ParseQuantity(ephemeralStorageRequest) + if err != nil { + return resources, errors.Wrapf(err, `couldn't parse ephemeral storage request "%s"`, ephemeralStorageRequest) + } + parsedCPULimit, err := resource.ParseQuantity(cpuLimit) if err != nil { return resources, errors.Wrapf(err, `couldn't parse CPU limit "%s"`, cpuLimit) @@ -51,6 +78,11 @@ func ParseResourceRequirements(cpuRequest, memRequest, cpuLimit, memLimit string return resources, errors.Wrapf(err, `couldn't parse memory limit "%s"`, memLimit) } + parsedEphemeralStorageLimit, err := resource.ParseQuantity(ephemeralStorageLimit) + if err != nil { + return resources, errors.Wrapf(err, `couldn't parse ephemeral storage limit "%s"`, ephemeralStorageLimit) + } + // A quantity of 0 is treated as unbounded unbounded := resource.MustParse("0") @@ -62,18 +94,28 @@ func ParseResourceRequirements(cpuRequest, memRequest, cpuLimit, memLimit string return resources, errors.WithStack(errors.Errorf(`Memory request "%s" must be less than or equal to Memory limit "%s"`, memRequest, memLimit)) } + if parsedEphemeralStorageLimit != unbounded && parsedEphemeralStorageRequest.Cmp(parsedEphemeralStorageLimit) > 0 { + return resources, errors.WithStack(errors.Errorf(`Ephemeral storage request "%s" must be less than or equal to Ephemeral storage limit "%s"`, ephemeralStorageRequest, ephemeralStorageLimit)) + } + // Only set resources if they are not unbounded if parsedCPURequest != unbounded { - resources.Requests[corev1.ResourceCPU] = parsedCPURequest + resources.Requests[corev1api.ResourceCPU] = parsedCPURequest } if parsedMemRequest != unbounded { - resources.Requests[corev1.ResourceMemory] = parsedMemRequest + resources.Requests[corev1api.ResourceMemory] = parsedMemRequest + } + if parsedEphemeralStorageRequest != unbounded { + resources.Requests[corev1api.ResourceEphemeralStorage] = parsedEphemeralStorageRequest } if parsedCPULimit != unbounded { - resources.Limits[corev1.ResourceCPU] = parsedCPULimit + resources.Limits[corev1api.ResourceCPU] = parsedCPULimit } if parsedMemLimit != unbounded { - resources.Limits[corev1.ResourceMemory] = parsedMemLimit + resources.Limits[corev1api.ResourceMemory] = parsedMemLimit + } + if parsedEphemeralStorageLimit != unbounded { + resources.Limits[corev1api.ResourceEphemeralStorage] = parsedEphemeralStorageLimit } return resources, nil diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/security_context.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/security_context.go index 22c2c4ce8..1fd911649 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/security_context.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/security_context.go @@ -20,12 +20,12 @@ import ( "strconv" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" + corev1api "k8s.io/api/core/v1" "sigs.k8s.io/yaml" ) -func ParseSecurityContext(runAsUser string, runAsGroup string, allowPrivilegeEscalation string, secCtx string) (corev1.SecurityContext, error) { - securityContext := corev1.SecurityContext{} +func ParseSecurityContext(runAsUser string, runAsGroup string, allowPrivilegeEscalation string, secCtx string) (corev1api.SecurityContext, error) { + securityContext := corev1api.SecurityContext{} if runAsUser != "" { parsedRunAsUser, err := strconv.ParseInt(runAsUser, 10, 64) diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/utils.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/utils.go index 5d64f117b..5e5e97603 100644 --- a/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/utils.go +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/kube/utils.go @@ -18,6 +18,7 @@ package kube import ( "context" + "encoding/json" "fmt" "strings" "time" @@ -25,7 +26,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" corev1api "k8s.io/api/core/v1" - storagev1api "k8s.io/api/storage/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -54,6 +55,11 @@ const ( KubeAnnSelectedNode = "volume.kubernetes.io/selected-node" ) +// VolumeSnapshotContentManagedByLabel is applied by the snapshot controller +// to the VolumeSnapshotContent object in case distributed snapshotting is enabled. +// The value contains the name of the node that handles the snapshot for the volume local to that node. +const VolumeSnapshotContentManagedByLabel = "snapshot.storage.kubernetes.io/managed-by" + var ErrorPodVolumeIsNotPVC = errors.New("pod volume is not a PVC") // NamespaceAndName returns a string in the format / @@ -148,8 +154,8 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core // GetVolumeDirectory gets the name of the directory on the host, under /var/lib/kubelet/pods//volumes/, // where the specified volume lives. // For volumes with a CSIVolumeSource, append "/mount" to the directory name. -func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) (string, error) { - pvc, pv, volume, err := GetPodPVCVolume(ctx, log, pod, volumeName, cli) +func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, kubeClient kubernetes.Interface) (string, error) { + pvc, pv, volume, err := GetPodPVCVolume(ctx, log, pod, volumeName, kubeClient) if err != nil { // This case implies the administrator created the PV and attached it directly, without PVC. // Note that only one VolumeSource can be populated per Volume on a pod @@ -164,7 +170,7 @@ func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1 // Most common case is that we have a PVC VolumeSource, and we need to check the PV it points to for a CSI source. // PV's been created with a CSI source. - isProvisionedByCSI, err := isProvisionedByCSI(log, pv, cli) + isProvisionedByCSI, err := isProvisionedByCSI(log, pv, kubeClient) if err != nil { return "", errors.WithStack(err) } @@ -179,9 +185,9 @@ func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1 } // GetVolumeMode gets the uploader.PersistentVolumeMode of the volume. -func GetVolumeMode(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) ( +func GetVolumeMode(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, kubeClient kubernetes.Interface) ( uploader.PersistentVolumeMode, error) { - _, pv, _, err := GetPodPVCVolume(ctx, log, pod, volumeName, cli) + _, pv, _, err := GetPodPVCVolume(ctx, log, pod, volumeName, kubeClient) if err != nil { if err == ErrorPodVolumeIsNotPVC { @@ -198,7 +204,7 @@ func GetVolumeMode(ctx context.Context, log logrus.FieldLogger, pod *corev1api.P // GetPodPVCVolume gets the PVC, PV and volume for a pod volume name. // Returns pod volume in case of ErrorPodVolumeIsNotPVC error -func GetPodPVCVolume(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) ( +func GetPodPVCVolume(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, kubeClient kubernetes.Interface) ( *corev1api.PersistentVolumeClaim, *corev1api.PersistentVolume, *corev1api.Volume, error) { var volume *corev1api.Volume @@ -217,14 +223,12 @@ func GetPodPVCVolume(ctx context.Context, log logrus.FieldLogger, pod *corev1api return nil, nil, volume, ErrorPodVolumeIsNotPVC // There is a pod volume but it is not a PVC } - pvc := &corev1api.PersistentVolumeClaim{} - err := cli.Get(ctx, client.ObjectKey{Namespace: pod.Namespace, Name: volume.VolumeSource.PersistentVolumeClaim.ClaimName}, pvc) + pvc, err := kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, volume.VolumeSource.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) if err != nil { return nil, nil, nil, errors.WithStack(err) } - pv := &corev1api.PersistentVolume{} - err = cli.Get(ctx, client.ObjectKey{Name: pvc.Spec.VolumeName}, pv) + pv, err := kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { return nil, nil, nil, errors.WithStack(err) } @@ -235,7 +239,7 @@ func GetPodPVCVolume(ctx context.Context, log logrus.FieldLogger, pod *corev1api // isProvisionedByCSI function checks whether this is a CSI PV by annotation. // Either "pv.kubernetes.io/provisioned-by" or "pv.kubernetes.io/migrated-to" indicates // PV is provisioned by CSI. -func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume, kbClient client.Client) (bool, error) { +func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume, kubeClient kubernetes.Interface) (bool, error) { if pv.Spec.CSI != nil { return true, nil } @@ -245,10 +249,11 @@ func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume, driverName := pv.Annotations[KubeAnnDynamicallyProvisioned] migratedDriver := pv.Annotations[KubeAnnMigratedTo] if len(driverName) > 0 || len(migratedDriver) > 0 { - list := &storagev1api.CSIDriverList{} - if err := kbClient.List(context.TODO(), list); err != nil { + list, err := kubeClient.StorageV1().CSIDrivers().List(context.TODO(), metav1.ListOptions{}) + if err != nil { return false, err } + for _, driver := range list.Items { if driverName == driver.Name || migratedDriver == driver.Name { log.Debugf("the annotation %s or %s equals to %s indicates the volume is provisioned by a CSI driver", KubeAnnDynamicallyProvisioned, KubeAnnMigratedTo, driver.Name) @@ -354,3 +359,29 @@ func HasBackupLabel(o *metav1.ObjectMeta, backupName string) bool { } return o.Labels[velerov1api.BackupNameLabel] == label.GetValidName(backupName) } + +func VerifyJSONConfigs(ctx context.Context, namespace string, crClient client.Client, configName string, configType any) error { + cm := new(corev1api.ConfigMap) + err := crClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configName}, cm) + if err != nil { + return errors.Wrapf(err, "fail to find ConfigMap %s", configName) + } + + if cm.Data == nil { + return errors.Errorf("data is not available in ConfigMap %s", configName) + } + + // Verify all the keys in ConfigMap's data. + jsonString := "" + for _, v := range cm.Data { + jsonString = v + + configs := configType + err = json.Unmarshal([]byte(jsonString), configs) + if err != nil { + return errors.Wrapf(err, "error to unmarshall data from ConfigMap %s", configName) + } + } + + return nil +} diff --git a/vendor/github.com/vmware-tanzu/velero/pkg/util/wildcard/expand.go b/vendor/github.com/vmware-tanzu/velero/pkg/util/wildcard/expand.go new file mode 100644 index 000000000..8767c8ed3 --- /dev/null +++ b/vendor/github.com/vmware-tanzu/velero/pkg/util/wildcard/expand.go @@ -0,0 +1,182 @@ +package wildcard + +import ( + "errors" + "strings" + + "github.com/gobwas/glob" + "k8s.io/apimachinery/pkg/util/sets" +) + +func ShouldExpandWildcards(includes []string, excludes []string) bool { + wildcardFound := false + for _, include := range includes { + // Special case: "*" alone means "match all" - don't expand + if include == "*" { + return false + } + + if containsWildcardPattern(include) { + wildcardFound = true + } + } + + for _, exclude := range excludes { + if containsWildcardPattern(exclude) { + wildcardFound = true + } + } + + return wildcardFound +} + +// containsWildcardPattern checks if a pattern contains any wildcard symbols +// Supported patterns: *, ?, [abc] +// Note: . and + are treated as literal characters (not wildcards) +// Note: ** and consecutive asterisks are NOT supported (will cause validation error) +func containsWildcardPattern(pattern string) bool { + return strings.ContainsAny(pattern, "*?[") +} + +func validateWildcardPatterns(patterns []string) error { + for _, pattern := range patterns { + if err := ValidateNamespaceName(pattern); err != nil { + return err + } + } + return nil +} + +func ValidateNamespaceName(pattern string) error { + // Check for invalid characters that are not supported in glob patterns + if strings.ContainsAny(pattern, "|()!{},") { + return errors.New("wildcard pattern contains unsupported characters: |, (, ), !, {, }, ,") + } + + // Check for consecutive asterisks (2 or more) + if strings.Contains(pattern, "**") { + return errors.New("wildcard pattern contains consecutive asterisks (only single * allowed)") + } + + // Check for malformed brace patterns + if err := validateBracePatterns(pattern); err != nil { + return err + } + + return nil +} + +// validateBracePatterns checks for malformed brace patterns like unclosed braces or empty braces +// Also validates bracket patterns [] for character classes +func validateBracePatterns(pattern string) error { + bracketDepth := 0 + + for i := 0; i < len(pattern); i++ { + if pattern[i] == '[' { + bracketStart := i + bracketDepth++ + + // Scan ahead to find the matching closing bracket and validate content + for j := i + 1; j < len(pattern) && bracketDepth > 0; j++ { + if pattern[j] == ']' { + bracketDepth-- + if bracketDepth == 0 { + // Found matching closing bracket - validate content + content := pattern[bracketStart+1 : j] + if content == "" { + return errors.New("wildcard pattern contains empty bracket pattern '[]'") + } + // Skip to the closing bracket + i = j + break + } + } + } + + // If we exited the loop without finding a match (bracketDepth > 0), bracket is unclosed + if bracketDepth > 0 { + return errors.New("wildcard pattern contains unclosed bracket '['") + } + + // i is now positioned at the closing bracket; the outer loop will increment it + } else if pattern[i] == ']' { + // Found a closing bracket without a matching opening bracket + return errors.New("wildcard pattern contains unmatched closing bracket ']'") + } + } + + return nil +} + +func ExpandWildcards(activeNamespaces []string, includes []string, excludes []string) ([]string, []string, error) { + expandedIncludes, err := expandWildcards(includes, activeNamespaces) + if err != nil { + return nil, nil, err + } + + expandedExcludes, err := expandWildcards(excludes, activeNamespaces) + if err != nil { + return nil, nil, err + } + + return expandedIncludes, expandedExcludes, nil +} + +// expands wildcard patterns into a list of namespaces, while normally passing non-wildcard patterns +func expandWildcards(patterns []string, activeNamespaces []string) ([]string, error) { + if len(patterns) == 0 { + return nil, nil + } + + // Validate patterns before processing + if err := validateWildcardPatterns(patterns); err != nil { + return nil, err + } + + matchedSet := make(map[string]struct{}) + + for _, pattern := range patterns { + // If the pattern is a non-wildcard pattern, we can just add it to the result + if !containsWildcardPattern(pattern) { + matchedSet[pattern] = struct{}{} + continue + } + + // Compile glob pattern + g, err := glob.Compile(pattern) + if err != nil { + return nil, err + } + + // Match against all namespaces + for _, ns := range activeNamespaces { + if g.Match(ns) { + matchedSet[ns] = struct{}{} + } + } + } + + // Convert set to slice + result := make([]string, 0, len(matchedSet)) + for ns := range matchedSet { + result = append(result, ns) + } + + return result, nil +} + +// GetWildcardResult returns the final list of namespaces after applying wildcard include/exclude logic +func GetWildcardResult(expandedIncludes []string, expandedExcludes []string) []string { + // Set check: set of expandedIncludes - set of expandedExcludes + expandedIncludesSet := sets.New(expandedIncludes...) + expandedExcludesSet := sets.New(expandedExcludes...) + selectedNamespacesSet := expandedIncludesSet.Difference(expandedExcludesSet) + + // Convert the set to a slice + selectedNamespaces := make([]string, 0, selectedNamespacesSet.Len()) + for ns := range selectedNamespacesSet { + selectedNamespaces = append(selectedNamespaces, ns) + } + + return selectedNamespaces +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index 24cea6882..000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context has been superseded by the standard library [context] package. -// -// Deprecated: Use the standard library context package instead. -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -// A Context carries a deadline, a cancellation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -// -//go:fix inline -type Context = context.Context - -// Canceled is the error returned by [Context.Err] when the context is canceled -// for some reason other than its deadline passing. -// -//go:fix inline -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled -// due to its deadline passing. -// -//go:fix inline -var DeadlineExceeded = context.DeadlineExceeded - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -// -//go:fix inline -func Background() Context { return context.Background() } - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). -// -//go:fix inline -func TODO() Context { return context.TODO() } - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// A CancelFunc may be called by multiple goroutines simultaneously. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc - -// WithCancel returns a derived context that points to the parent context -// but has a new Done channel. The returned context's Done channel is closed -// when the returned cancel function is called or when the parent context's -// Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this [Context] complete. -// -//go:fix inline -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - return context.WithCancel(parent) -} - -// WithDeadline returns a derived context that points to the parent context -// but has the deadline adjusted to be no later than d. If the parent's -// deadline is already earlier than d, WithDeadline(parent, d) is semantically -// equivalent to parent. The returned [Context.Done] channel is closed when -// the deadline expires, when the returned cancel function is called, -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this [Context] complete. -// -//go:fix inline -func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { - return context.WithDeadline(parent, d) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this [Context] complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -// -//go:fix inline -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return context.WithTimeout(parent, timeout) -} - -// WithValue returns a derived context that points to the parent Context. -// In the derived context, the value associated with key is val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The provided key must be comparable and should not be of type -// string or any other built-in type to avoid collisions between -// packages using context. Users of WithValue should define their own -// types for keys. To avoid allocating when assigning to an -// interface{}, context keys often have concrete type -// struct{}. Alternatively, exported context key variables' static -// type should be a pointer or interface. -// -//go:fix inline -func WithValue(parent Context, key, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index d9bfa6e1e..2079de7b0 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,73 +1,159 @@ # How to contribute -We definitely welcome your patches and contributions to gRPC! Please read the gRPC -organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) -and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. +We welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance +rules](https://github.com/grpc/grpc-community/blob/master/governance.md) before +proceeding. If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). +[Contributor License +Agreement](https://identity.linuxfoundation.org/projects/cncf). When you create +your first PR, a link will be added as a comment that contains the steps needed +to complete this process. + +## Getting Started + +A great way to start is by searching through our open issues. [Unassigned issues +labeled as "help +wanted"](https://github.com/grpc/grpc-go/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22Status%3A%20Help%20Wanted%22%20no%3Aassignee) +are especially nice for first-time contributors, as they should be well-defined +problems that already have agreed-upon solutions. + +## Code Style + +We follow [Google's published Go style +guide](https://google.github.io/styleguide/go/). Note that there are three +primary documents that make up this style guide; please follow them as closely +as possible. If a reviewer recommends something that contradicts those +guidelines, there may be valid reasons to do so, but it should be rare. ## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly. + +Please read the following carefully to ensure your contributions can be merged +smoothly and quickly. + +### PR Contents - Create **small PRs** that are narrowly focused on **addressing a single - concern**. We often times receive PRs that are trying to fix several things at - a time, but only one fix is considered acceptable, nothing gets merged and - both author's & review's time is wasted. Create more PRs to address different - concerns and everyone will be happy. + concern**. We often receive PRs that attempt to fix several things at the same + time, and if one part of the PR has a problem, that will hold up the entire + PR. + +- If your change does not address an **open issue** with an **agreed + resolution**, consider opening an issue and discussing it first. If you are + suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). Many new features that are not + bug fixes will require cross-language agreement. + +- If you want to fix **formatting or style**, consider whether your changes are + an obvious improvement or might be considered a personal preference. If a + style change is based on preference, it likely will not be accepted. If it + corrects widely agreed-upon anti-patterns, then please do create a PR and + explain the benefits of the change. + +- For correcting **misspellings**, please be aware that we use some terms that + are sometimes flagged by spell checkers. As an example, "if an only if" is + often written as "iff". Please do not make spelling correction changes unless + you are certain they are misspellings. + +- **All tests need to be passing** before your change can be merged. We + recommend you run tests locally before creating your PR to catch breakages + early on: -- If you are searching for features to work on, issues labeled [Status: Help - Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) - is a great place to start. These issues are well-documented and usually can be - resolved with a single pull request. + - `./scripts/vet.sh` to catch vet errors. + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests. + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file - and update the year. + Note that we have a multi-module repo, so `go test` commands may need to be + run from the root of each module in order to cause all tests to run. + + *Alternatively*, you may find it easier to push your changes to your fork on + GitHub, which will trigger a GitHub Actions run that you can use to verify + everything is passing. + +- Note that there are two GitHub actions checks that need not be green: + + 1. We test the freshness of the generated proto code we maintain via the + `vet-proto` check. If the source proto files are updated, but our repo is + not updated, an optional checker will fail. This will be fixed by our team + in a separate PR and will not prevent the merge of your PR. + + 2. We run a checker that will fail if there is any change in dependencies of + an exported package via the `dependencies` check. If new dependencies are + added that are not appropriate, we may not accept your PR (see below). + +- If you are adding a **new file**, make sure it has the **copyright message** + template at the top as a comment. You can copy the message from an existing + file and update the year. - The grpc package should only depend on standard Go packages and a small number - of exceptions. If your contribution introduces new dependencies which are NOT - in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a - discussion with gRPC-Go authors and consultants. + of exceptions. **If your contribution introduces new dependencies**, you will + need a discussion with gRPC-Go maintainers. -- For speculative changes, consider opening an issue and discussing it first. If - you are suggesting a behavioral or API change, consider starting with a [gRFC - proposal](https://github.com/grpc/proposal). +### PR Descriptions -- Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a GitHub issue if it exists. +- **PR titles** should start with the name of the component being addressed, or + the type of change. Examples: transport, client, server, round_robin, xds, + cleanup, deps. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the - benefits of the change. +- Read and follow the **guidelines for PR titles and descriptions** here: + https://google.github.io/eng-practices/review/developer/cl-descriptions.html -- Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We'll mark it as `Status: Requires - Reporter Clarification` if we expect you to respond to these comments in a - timely manner. If the PR remains inactive for 6 days, it will be marked as - `stale` and automatically close 7 days after that if we don't hear back from - you. + *particularly* the sections "First Line" and "Body is Informative". -- Maintain **clean commit history** and use **meaningful commit messages**. PRs - with messy commit history are difficult to review and won't be merged. Use - `rebase -i upstream/master` to curate your commit history and/or to bring in - latest changes from master (but avoid rebasing in the middle of a code - review). + Note: your PR description will be used as the git commit message in a + squash-and-merge if your PR is approved. We may make changes to this as + necessary. -- Keep your PR up to date with upstream/master (if there are merge conflicts, we - can't really merge your change). +- **Does this PR relate to an open issue?** On the first line, please use the + tag `Fixes #` to ensure the issue is closed when the PR is merged. Or + use `Updates #` if the PR is related to an open issue, but does not fix + it. Consider filing an issue if one does not already exist. -- **All tests need to be passing** before your change can be merged. We - recommend you **run tests locally** before creating your PR to catch breakages - early on. - - `./scripts/vet.sh` to catch vet errors - - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode +- PR descriptions *must* conclude with **release notes** as follows: + + ``` + RELEASE NOTES: + * : + ``` + + This need not match the PR title. + + The summary must: + + * be something that gRPC users will understand. + + * clearly explain the feature being added, the issue being fixed, or the + behavior being changed, etc. If fixing a bug, be clear about how the bug + can be triggered by an end-user. + + * begin with a capital letter and use complete sentences. -- Exceptions to the rules can be made if there's a compelling reason for doing so. + * be as short as possible to describe the change being made. + + If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or + GitHub-related, use `RELEASE NOTES: n/a`. + +### PR Process + +- Please **self-review** your code changes before sending your PR. This will + prevent simple, obvious errors from causing delays. + +- Maintain a **clean commit history** and use **meaningful commit messages**. + PRs with messy commit histories are difficult to review and won't be merged. + Before sending your PR, ensure your changes are based on top of the latest + `upstream/master` commits, and avoid rebasing in the middle of a code review. + You should **never use `git push -f`** unless absolutely necessary during a + review, as it can interfere with GitHub's tracking of comments. + +- Unless your PR is trivial, you should **expect reviewer comments** that you + will need to address before merging. We'll label the PR as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale`, and we will automatically close it after 7 days if we don't hear back + from you. Please feel free to ping issues or bugs if you do not get a response + within a week. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 5d4096d46..df35bb9a8 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,19 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) -- [aranjans](https://github.com/aranjans), Google LLC - [arjan-bal](https://github.com/arjan-bal), Google LLC - [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [erm-g](https://github.com/erm-g), Google LLC - [gtcooke94](https://github.com/gtcooke94), Google LLC -- [purnesh42h](https://github.com/purnesh42h), Google LLC -- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez) +- [aranjans](https://github.com/aranjans) - [canguler](https://github.com/canguler) - [cesarghali](https://github.com/cesarghali) +- [erm-g](https://github.com/erm-g) - [iamqizhao](https://github.com/iamqizhao) - [jeanbza](https://github.com/jeanbza) - [jtattermusch](https://github.com/jtattermusch) @@ -32,5 +30,7 @@ for general contribution guidelines. - [matt-kwong](https://github.com/matt-kwong) - [menghanl](https://github.com/menghanl) - [nicolasnoble](https://github.com/nicolasnoble) +- [purnesh42h](https://github.com/purnesh42h) - [srini100](https://github.com/srini100) - [yongni](https://github.com/yongni) +- [zasweq](https://github.com/zasweq) diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index b572707c6..f9a88d597 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -32,6 +32,7 @@ import "google.golang.org/grpc" - [Low-level technical docs](Documentation) from this repository - [Performance benchmark][] - [Examples](examples) +- [Contribution guidelines](CONTRIBUTING.md) ## FAQ diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index c9b343c71..b1264017d 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -360,6 +360,10 @@ type Balancer interface { // call SubConn.Shutdown for its existing SubConns; however, this will be // required in a future release, so it is recommended. Close() + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() } // ExitIdler is an optional interface for balancers to implement. If @@ -367,8 +371,8 @@ type Balancer interface { // the ClientConn is idle. If unimplemented, ClientConn.Connect will cause // all SubConns to connect. // -// Notice: it will be required for all balancers to implement this in a future -// release. +// Deprecated: All balancers must implement this interface. This interface will +// be removed in a future release. type ExitIdler interface { // ExitIdle instructs the LB policy to reconnect to backends / exit the // IDLE state, if appropriate and possible. Note that SubConns that enter diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index cc606f4da..360db08eb 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -37,6 +37,8 @@ import ( "google.golang.org/grpc/resolver" ) +var randIntN = rand.IntN + // ChildState is the balancer state of a child along with the endpoint which // identifies the child balancer. type ChildState struct { @@ -45,7 +47,15 @@ type ChildState struct { // Balancer exposes only the ExitIdler interface of the child LB policy. // Other methods of the child policy are called only by endpointsharding. - Balancer balancer.ExitIdler + Balancer ExitIdler +} + +// ExitIdler provides access to only the ExitIdle method of the child balancer. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() } // Options are the options to configure the behaviour of the @@ -104,6 +114,21 @@ type endpointSharding struct { mu sync.Mutex } +// rotateEndpoints returns a slice of all the input endpoints rotated a random +// amount. +func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint { + les := len(es) + if les == 0 { + return es + } + r := randIntN(les) + // Make a copy to avoid mutating data beyond the end of es. + ret := make([]resolver.Endpoint, les) + copy(ret, es[r:]) + copy(ret[les-r:], es[:r]) + return ret +} + // UpdateClientConnState creates a child for new endpoints and deletes children // for endpoints that are no longer present. It also updates all the children, // and sends a single synchronous update of the childrens' aggregated state at @@ -125,7 +150,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState newChildren := resolver.NewEndpointMap[*balancerWrapper]() // Update/Create new children. - for _, endpoint := range state.ResolverState.Endpoints { + for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) { if _, ok := newChildren.Get(endpoint); ok { // Endpoint child was already created, continue to avoid duplicate // update. @@ -205,6 +230,16 @@ func (es *endpointSharding) Close() { } } +func (es *endpointSharding) ExitIdle() { + es.childMu.Lock() + defer es.childMu.Unlock() + for _, bw := range es.children.Load().Values() { + if !bw.isClosed { + bw.child.ExitIdle() + } + } +} + // updateState updates this component's state. It sends the aggregated state, // and a picker with round robin behavior with all the child states present if // needed. @@ -261,7 +296,7 @@ func (es *endpointSharding) updateState() { p := &pickerWithChildStates{ pickers: pickers, childStates: childStates, - next: uint32(rand.IntN(len(pickers))), + next: uint32(randIntN(len(pickers))), } es.cc.UpdateState(balancer.State{ ConnectivityState: aggState, @@ -326,15 +361,13 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) { // ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to // avoid deadlocks due to synchronous balancer state updates. func (bw *balancerWrapper) ExitIdle() { - if ei, ok := bw.child.(balancer.ExitIdler); ok { - go func() { - bw.es.childMu.Lock() - if !bw.isClosed { - ei.ExitIdle() - } - bw.es.childMu.Unlock() - }() - } + go func() { + bw.es.childMu.Lock() + if !bw.isClosed { + bw.child.ExitIdle() + } + bw.es.childMu.Unlock() + }() } // updateClientConnStateLocked delivers the ClientConnState to the child diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index ea8899818..b4bc3a2bf 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,55 +16,124 @@ * */ -// Package pickfirst contains the pick_first load balancing policy. +// Package pickfirst contains the pick_first load balancing policy which +// is the universal leaf policy. package pickfirst import ( "encoding/json" "errors" "fmt" - rand "math/rand/v2" + "net" + "net/netip" + "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - - _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { - if envconfig.NewPickFirstEnabled { - return - } balancer.Register(pickfirstBuilder{}) } -var logger = grpclog.Component("pick-first-lb") +// Name is the name of the pick_first balancer. +const Name = "pick_first" + +// enableHealthListenerKeyType is a unique key type used in resolver +// attributes to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "{disconnection}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) +) const ( - // Name is the name of the pick_first balancer. - Name = "pick_first" - logPrefix = "[pick-first-lb %p] " + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 ) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{cc: cc} +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: cc.MetricsRecorder(), + + subConns: resolver.NewAddressMapV2[*scData](), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (pickfirstBuilder) Name() string { +func (b pickfirstBuilder) Name() string { return Name } +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -74,90 +143,129 @@ type pfConfig struct { ShuffleAddressList bool `json:"shuffleAddressList"` } -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } - return cfg, nil + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil } type pickfirstBalancer struct { - logger *internalgrpclog.PrefixLogger - state connectivity.State - cc balancer.ClientConn - subConn balancer.SubConn + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMapV2[*scData] + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { if b.logger.V(2) { b.logger.Infof("Received error from the name resolver: %v", err) } - if b.subConn == nil { - b.state = connectivity.TransientFailure - } - if b.state != connectivity.TransientFailure { - // The picker will not change since the balancer does not currently - // report an error. + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } return } - b.cc.UpdateState(balancer.State{ + + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) } -// Shuffler is an interface for shuffling an address list. -type Shuffler interface { - ShuffleAddressListForTesting(n int, swap func(i, j int)) -} - -// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n -// is the number of elements. swap swaps the elements with indexes i and j. -func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } - func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // The resolver reported an empty address list. Treat it like an error by - // calling b.ResolverError. - if b.subConn != nil { - // Shut down the old subConn. All addresses were removed, so it is - // no longer valid. - b.subConn.Shutdown() - b.subConn = nil - } - b.ResolverError(errors.New("produced zero addresses")) + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig - // already does so. + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } - var addrs []resolver.Address + var newAddrs []resolver.Address if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling will - // change the order of endpoints but not touch the order of the addresses - // within each endpoint. - A61 + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } - // "Flatten the list by concatenating the ordered list of addresses for each - // of the endpoints, in order." - A61 + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8304 section 4." - A61 - // TODO: support the above language. - addrs = append(addrs, endpoint.Addresses...) + newAddrs = append(newAddrs, endpoint.Addresses...) } } else { // Endpoints not set, process addresses until we migrate resolver @@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. - addrs = state.ResolverState.Addresses + newAddrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] }) } } - if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, addrs) + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } - var subConn balancer.SubConn - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(subConn, state) - }, - }) - if err != nil { - if b.logger.V(2) { - b.logger.Infof("Failed to create new SubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - return balancer.ErrBadResolverState + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() } - b.subConn = subConn - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.subConn.Connect() return nil } @@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) } -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if b.logger.V(2) { - b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + // Move the balancer into CONNECTING state immediately. This is done to + // avoid staying in IDLE if a resolver update arrives before the first + // SubConn reports CONNECTING. + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.subConn.Shutdown() + } + b.subConns = resolver.NewAddressMapV2[*scData]() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMapV2[bool]() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + seenAddrs.Set(addr, true) + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMapV2[bool]() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, sd := range b.subConns.Values() { + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMapV2[*scData]() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + switch sd.rawConnectivityState { + case connectivity.Idle: + sd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return } - if b.subConn != subConn { + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } if b.logger.V(2) { - b.logger.Infof("Ignored state change because subConn is not recognized") + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { return } - if state.ConnectivityState == connectivity.Shutdown { - b.subConn = nil + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown return } - switch state.ConnectivityState { - case connectivity.Ready: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, - }) - case connectivity.Connecting: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. See A62. + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. case connectivity.Idle: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. Also kick the - // subConn out of Idle into Connecting. See A62. - b.subConn.Connect() + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, sd := range b.subConns.Values() { + if !sd.connectionFailedInFirstPass { return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &idlePicker{subConn: subConn}, + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, sd := range b.subConns.Values() { + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, }) case connectivity.TransientFailure: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{err: state.ConnectionError}, + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) } - b.state = state.ConnectivityState } -func (b *pickfirstBalancer) Close() { +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) } -func (b *pickfirstBalancer) ExitIdle() { - if b.subConn != nil && b.state == connectivity.Idle { - b.subConn.Connect() - } +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) } type picker struct { @@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - subConn balancer.SubConn + exitIdle func() } func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.subConn.Connect() + i.exitIdle() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go deleted file mode 100644 index 494314f23..000000000 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ /dev/null @@ -1,927 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package pickfirstleaf contains the pick_first load balancing policy which -// will be the universal leaf policy after dualstack changes are implemented. -// -// # Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. -package pickfirstleaf - -import ( - "encoding/json" - "errors" - "fmt" - "net" - "net/netip" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/pickfirst/internal" - "google.golang.org/grpc/connectivity" - expstats "google.golang.org/grpc/experimental/stats" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" - internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -func init() { - if envconfig.NewPickFirstEnabled { - // Register as the default pick_first balancer. - Name = "pick_first" - } - balancer.Register(pickfirstBuilder{}) -} - -type ( - // enableHealthListenerKeyType is a unique key type used in resolver - // attributes to indicate whether the health listener usage is enabled. - enableHealthListenerKeyType struct{} - // managedByPickfirstKeyType is an attribute key type to inform Outlier - // Detection that the generic health listener is being used. - // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when - // implementing the dualstack design. This is a hack. Once Dualstack is - // completed, outlier detection will stop sending ejection updates through - // the connectivity listener. - managedByPickfirstKeyType struct{} -) - -var ( - logger = grpclog.Component("pick-first-leaf-lb") - // Name is the name of the pick_first_leaf balancer. - // It is changed to "pick_first" in init() if this balancer is to be - // registered as the default pickfirst. - Name = "pick_first_leaf" - disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.disconnections", - Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "disconnection", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_succeeded", - Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "attempt", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_failed", - Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "attempt", - Labels: []string{"grpc.target"}, - Default: false, - }) -) - -const ( - // TODO: change to pick-first when this becomes the default pick_first policy. - logPrefix = "[pick-first-leaf-lb %p] " - // connectionDelayInterval is the time to wait for during the happy eyeballs - // pass before starting the next connection attempt. - connectionDelayInterval = 250 * time.Millisecond -) - -type ipAddrFamily int - -const ( - // ipAddrFamilyUnknown represents strings that can't be parsed as an IP - // address. - ipAddrFamilyUnknown ipAddrFamily = iota - ipAddrFamilyV4 - ipAddrFamilyV6 -) - -type pickfirstBuilder struct{} - -func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{ - cc: cc, - target: bo.Target.String(), - metricsRecorder: cc.MetricsRecorder(), - - subConns: resolver.NewAddressMapV2[*scData](), - state: connectivity.Connecting, - cancelConnectionTimer: func() {}, - } - b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) - return b -} - -func (b pickfirstBuilder) Name() string { - return Name -} - -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) - } - return cfg, nil -} - -// EnableHealthListener updates the state to configure pickfirst for using a -// generic health listener. -func EnableHealthListener(state resolver.State) resolver.State { - state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) - return state -} - -// IsManagedByPickfirst returns whether an address belongs to a SubConn -// managed by the pickfirst LB policy. -// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable -// outlier_detection via the with connectivity listener when using pick_first. -// Once Dualstack changes are complete, all SubConns will be created by -// pick_first and outlier detection will only use the health listener for -// ejection. This hack can then be removed. -func IsManagedByPickfirst(addr resolver.Address) bool { - return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil -} - -type pfConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - // If set to true, instructs the LB policy to shuffle the order of the list - // of endpoints received from the name resolver before attempting to - // connect to them. - ShuffleAddressList bool `json:"shuffleAddressList"` -} - -// scData keeps track of the current state of the subConn. -// It is not safe for concurrent access. -type scData struct { - // The following fields are initialized at build time and read-only after - // that. - subConn balancer.SubConn - addr resolver.Address - - rawConnectivityState connectivity.State - // The effective connectivity state based on raw connectivity, health state - // and after following sticky TransientFailure behaviour defined in A62. - effectiveState connectivity.State - lastErr error - connectionFailedInFirstPass bool -} - -func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { - addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) - sd := &scData{ - rawConnectivityState: connectivity.Idle, - effectiveState: connectivity.Idle, - addr: addr, - } - sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(sd, state) - }, - }) - if err != nil { - return nil, err - } - sd.subConn = sc - return sd, nil -} - -type pickfirstBalancer struct { - // The following fields are initialized at build time and read-only after - // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn - target string - metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil - - // The mutex is used to ensure synchronization of updates triggered - // from the idle picker and the already serialized resolver, - // SubConn state updates. - mu sync.Mutex - // State reported to the channel based on SubConn states and resolver - // updates. - state connectivity.State - // scData for active subonns mapped by address. - subConns *resolver.AddressMapV2[*scData] - addressList addressList - firstPass bool - numTF int - cancelConnectionTimer func() - healthCheckingEnabled bool -} - -// ResolverError is called by the ClientConn when the name resolver produces -// an error or when pickfirst determined the resolver update to be invalid. -func (b *pickfirstBalancer) ResolverError(err error) { - b.mu.Lock() - defer b.mu.Unlock() - b.resolverErrorLocked(err) -} - -func (b *pickfirstBalancer) resolverErrorLocked(err error) { - if b.logger.V(2) { - b.logger.Infof("Received error from the name resolver: %v", err) - } - - // The picker will not change since the balancer does not currently - // report an error. If the balancer hasn't received a single good resolver - // update yet, transition to TRANSIENT_FAILURE. - if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { - if b.logger.V(2) { - b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") - } - return - } - - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) -} - -func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - b.mu.Lock() - defer b.mu.Unlock() - b.cancelConnectionTimer() - if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // Cleanup state pertaining to the previous resolver state. - // Treat an empty address list like an error by calling b.ResolverError. - b.closeSubConnsLocked() - b.addressList.updateAddrs(nil) - b.resolverErrorLocked(errors.New("produced zero addresses")) - return balancer.ErrBadResolverState - } - b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil - cfg, ok := state.BalancerConfig.(pfConfig) - if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) - } - - if b.logger.V(2) { - b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) - } - - var newAddrs []resolver.Address - if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling - // will change the order of endpoints but not touch the order of the - // addresses within each endpoint. - A61 - if cfg.ShuffleAddressList { - endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - - // "Flatten the list by concatenating the ordered list of addresses for - // each of the endpoints, in order." - A61 - for _, endpoint := range endpoints { - newAddrs = append(newAddrs, endpoint.Addresses...) - } - } else { - // Endpoints not set, process addresses until we migrate resolver - // emissions fully to Endpoints. The top channel does wrap emitted - // addresses with endpoints, however some balancers such as weighted - // target do not forward the corresponding correct endpoints down/split - // endpoints properly. Once all balancers correctly forward endpoints - // down, can delete this else conditional. - newAddrs = state.ResolverState.Addresses - if cfg.ShuffleAddressList { - newAddrs = append([]resolver.Address{}, newAddrs...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - } - - // If an address appears in multiple endpoints or in the same endpoint - // multiple times, we keep it only once. We will create only one SubConn - // for the address because an AddressMap is used to store SubConns. - // Not de-duplicating would result in attempting to connect to the same - // SubConn multiple times in the same pass. We don't want this. - newAddrs = deDupAddresses(newAddrs) - newAddrs = interleaveAddresses(newAddrs) - - prevAddr := b.addressList.currentAddress() - prevSCData, found := b.subConns.Get(prevAddr) - prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready - b.addressList.updateAddrs(newAddrs) - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. - if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { - return nil - } - - b.reconcileSubConnsLocked(newAddrs) - // If it's the first resolver update or the balancer was already READY - // (but the new address list does not contain the ready SubConn) or - // CONNECTING, enter CONNECTING. - // We may be in TRANSIENT_FAILURE due to a previous empty address list, - // we should still enter CONNECTING because the sticky TF behaviour - // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported - // due to connectivity failures. - if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { - // Start connection attempt at first address. - b.forceUpdateConcludedStateLocked(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.startFirstPassLocked() - } else if b.state == connectivity.TransientFailure { - // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until - // we're READY. See A62. - b.startFirstPassLocked() - } - return nil -} - -// UpdateSubConnState is unused as a StateListener is always registered when -// creating SubConns. -func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) -} - -func (b *pickfirstBalancer) Close() { - b.mu.Lock() - defer b.mu.Unlock() - b.closeSubConnsLocked() - b.cancelConnectionTimer() - b.state = connectivity.Shutdown -} - -// ExitIdle moves the balancer out of idle state. It can be called concurrently -// by the idlePicker and clientConn so access to variables should be -// synchronized. -func (b *pickfirstBalancer) ExitIdle() { - b.mu.Lock() - defer b.mu.Unlock() - if b.state == connectivity.Idle { - b.startFirstPassLocked() - } -} - -func (b *pickfirstBalancer) startFirstPassLocked() { - b.firstPass = true - b.numTF = 0 - // Reset the connection attempt record for existing SubConns. - for _, sd := range b.subConns.Values() { - sd.connectionFailedInFirstPass = false - } - b.requestConnectionLocked() -} - -func (b *pickfirstBalancer) closeSubConnsLocked() { - for _, sd := range b.subConns.Values() { - sd.subConn.Shutdown() - } - b.subConns = resolver.NewAddressMapV2[*scData]() -} - -// deDupAddresses ensures that each address appears only once in the slice. -func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMapV2[*scData]() - retAddrs := []resolver.Address{} - - for _, addr := range addrs { - if _, ok := seenAddrs.Get(addr); ok { - continue - } - retAddrs = append(retAddrs, addr) - } - return retAddrs -} - -// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) -// as per RFC-8305 section 4. -// Whichever address family is first in the list is followed by an address of -// the other address family; that is, if the first address in the list is IPv6, -// then the first IPv4 address should be moved up in the list to be second in -// the list. It doesn't support configuring "First Address Family Count", i.e. -// there will always be a single member of the first address family at the -// beginning of the interleaved list. -// Addresses that are neither IPv4 nor IPv6 are treated as part of a third -// "unknown" family for interleaving. -// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 -func interleaveAddresses(addrs []resolver.Address) []resolver.Address { - familyAddrsMap := map[ipAddrFamily][]resolver.Address{} - interleavingOrder := []ipAddrFamily{} - for _, addr := range addrs { - family := addressFamily(addr.Addr) - if _, found := familyAddrsMap[family]; !found { - interleavingOrder = append(interleavingOrder, family) - } - familyAddrsMap[family] = append(familyAddrsMap[family], addr) - } - - interleavedAddrs := make([]resolver.Address, 0, len(addrs)) - - for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { - // Some IP types may have fewer addresses than others, so we look for - // the next type that has a remaining member to add to the interleaved - // list. - family := interleavingOrder[curFamilyIdx] - remainingMembers := familyAddrsMap[family] - if len(remainingMembers) > 0 { - interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) - familyAddrsMap[family] = remainingMembers[1:] - } - } - - return interleavedAddrs -} - -// addressFamily returns the ipAddrFamily after parsing the address string. -// If the address isn't of the format "ip-address:port", it returns -// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when -// using a resolver like passthrough where the address may be a hostname in -// some format that the dialer can resolve. -func addressFamily(address string) ipAddrFamily { - // Parse the IP after removing the port. - host, _, err := net.SplitHostPort(address) - if err != nil { - return ipAddrFamilyUnknown - } - ip, err := netip.ParseAddr(host) - if err != nil { - return ipAddrFamilyUnknown - } - switch { - case ip.Is4() || ip.Is4In6(): - return ipAddrFamilyV4 - case ip.Is6(): - return ipAddrFamilyV6 - default: - return ipAddrFamilyUnknown - } -} - -// reconcileSubConnsLocked updates the active subchannels based on a new address -// list from the resolver. It does this by: -// - closing subchannels: any existing subchannels associated with addresses -// that are no longer in the updated list are shut down. -// - removing subchannels: entries for these closed subchannels are removed -// from the subchannel map. -// -// This ensures that the subchannel map accurately reflects the current set of -// addresses received from the name resolver. -func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMapV2[bool]() - for _, addr := range newAddrs { - newAddrsMap.Set(addr, true) - } - - for _, oldAddr := range b.subConns.Keys() { - if _, ok := newAddrsMap.Get(oldAddr); ok { - continue - } - val, _ := b.subConns.Get(oldAddr) - val.subConn.Shutdown() - b.subConns.Delete(oldAddr) - } -} - -// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn -// becomes ready, which means that all other subConn must be shutdown. -func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { - b.cancelConnectionTimer() - for _, sd := range b.subConns.Values() { - if sd.subConn != selected.subConn { - sd.subConn.Shutdown() - } - } - b.subConns = resolver.NewAddressMapV2[*scData]() - b.subConns.Set(selected.addr, selected) -} - -// requestConnectionLocked starts connecting on the subchannel corresponding to -// the current address. If no subchannel exists, one is created. If the current -// subchannel is in TransientFailure, a connection to the next address is -// attempted until a subchannel is found. -func (b *pickfirstBalancer) requestConnectionLocked() { - if !b.addressList.isValid() { - return - } - var lastErr error - for valid := true; valid; valid = b.addressList.increment() { - curAddr := b.addressList.currentAddress() - sd, ok := b.subConns.Get(curAddr) - if !ok { - var err error - // We want to assign the new scData to sd from the outer scope, - // hence we can't use := below. - sd, err = b.newSCData(curAddr) - if err != nil { - // This should never happen, unless the clientConn is being shut - // down. - if b.logger.V(2) { - b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) - } - // Do nothing, the LB policy will be closed soon. - return - } - b.subConns.Set(curAddr, sd) - } - - switch sd.rawConnectivityState { - case connectivity.Idle: - sd.subConn.Connect() - b.scheduleNextConnectionLocked() - return - case connectivity.TransientFailure: - // The SubConn is being re-used and failed during a previous pass - // over the addressList. It has not completed backoff yet. - // Mark it as having failed and try the next address. - sd.connectionFailedInFirstPass = true - lastErr = sd.lastErr - continue - case connectivity.Connecting: - // Wait for the connection attempt to complete or the timer to fire - // before attempting the next address. - b.scheduleNextConnectionLocked() - return - default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) - return - - } - } - - // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass if possible. - b.endFirstPassIfPossibleLocked(lastErr) -} - -func (b *pickfirstBalancer) scheduleNextConnectionLocked() { - b.cancelConnectionTimer() - if !b.addressList.hasNext() { - return - } - curAddr := b.addressList.currentAddress() - cancelled := false // Access to this is protected by the balancer's mutex. - closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { - b.mu.Lock() - defer b.mu.Unlock() - // If the scheduled task is cancelled while acquiring the mutex, return. - if cancelled { - return - } - if b.logger.V(2) { - b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) - } - if b.addressList.increment() { - b.requestConnectionLocked() - } - }) - // Access to the cancellation callback held by the balancer is guarded by - // the balancer's mutex, so it's safe to set the boolean from the callback. - b.cancelConnectionTimer = sync.OnceFunc(func() { - cancelled = true - closeFn() - }) -} - -func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - oldState := sd.rawConnectivityState - sd.rawConnectivityState = newState.ConnectivityState - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes this - // SubConn. - if !b.isActiveSCData(sd) { - return - } - if newState.ConnectivityState == connectivity.Shutdown { - sd.effectiveState = connectivity.Shutdown - return - } - - // Record a connection attempt when exiting CONNECTING. - if newState.ConnectivityState == connectivity.TransientFailure { - sd.connectionFailedInFirstPass = true - connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) - } - - if newState.ConnectivityState == connectivity.Ready { - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - b.shutdownRemainingLocked(sd) - if !b.addressList.seekTo(sd.addr) { - // This should not fail as we should have only one SubConn after - // entering READY. The SubConn should be present in the addressList. - b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) - return - } - if !b.healthCheckingEnabled { - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) - } - - sd.effectiveState = connectivity.Ready - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - return - } - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) - } - // Send a CONNECTING update to take the SubConn out of sticky-TF if - // required. - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { - b.updateSubConnHealthState(sd, scs) - }) - return - } - - // If the LB policy is READY, and it receives a subchannel state change, - // it means that the READY subchannel has failed. - // A SubConn can also transition from CONNECTING directly to IDLE when - // a transport is successfully created, but the connection fails - // before the SubConn can send the notification for READY. We treat - // this as a successful connection and transition to IDLE. - // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second - // part of the if condition below once the issue is fixed. - if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { - // Once a transport fails, the balancer enters IDLE and starts from - // the first address when the picker is used. - b.shutdownRemainingLocked(sd) - sd.effectiveState = newState.ConnectivityState - // READY SubConn interspliced in between CONNECTING and IDLE, need to - // account for that. - if oldState == connectivity.Connecting { - // A known issue (https://github.com/grpc/grpc-go/issues/7862) - // causes a race that prevents the READY state change notification. - // This works around it. - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - } - disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) - b.addressList.reset() - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, - }) - return - } - - if b.firstPass { - switch newState.ConnectivityState { - case connectivity.Connecting: - // The effective state can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in - // TRANSIENT_FAILURE until it's READY. See A62. - if sd.effectiveState != connectivity.TransientFailure { - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - } - case connectivity.TransientFailure: - sd.lastErr = newState.ConnectionError - sd.effectiveState = connectivity.TransientFailure - // Since we're re-using common SubConns while handling resolver - // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. Happy Eyeballs will also - // cause out of order updates to arrive. - - if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - b.cancelConnectionTimer() - if b.addressList.increment() { - b.requestConnectionLocked() - return - } - } - - // End the first pass if we've seen a TRANSIENT_FAILURE from all - // SubConns once. - b.endFirstPassIfPossibleLocked(newState.ConnectionError) - } - return - } - - // We have finished the first pass, keep re-connecting failing SubConns. - switch newState.ConnectivityState { - case connectivity.TransientFailure: - b.numTF = (b.numTF + 1) % b.subConns.Len() - sd.lastErr = newState.ConnectionError - if b.numTF%b.subConns.Len() == 0 { - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: newState.ConnectionError}, - }) - } - // We don't need to request re-resolution since the SubConn already - // does that before reporting TRANSIENT_FAILURE. - // TODO: #7534 - Move re-resolution requests from SubConn into - // pick_first. - case connectivity.Idle: - sd.subConn.Connect() - } -} - -// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the -// addresses are tried and their SubConns have reported a failure. -func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { - // An optimization to avoid iterating over the entire SubConn map. - if b.addressList.isValid() { - return - } - // Connect() has been called on all the SubConns. The first pass can be - // ended if all the SubConns have reported a failure. - for _, sd := range b.subConns.Values() { - if !sd.connectionFailedInFirstPass { - return - } - } - b.firstPass = false - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: lastErr}, - }) - // Start re-connecting all the SubConns that are already in IDLE. - for _, sd := range b.subConns.Values() { - if sd.rawConnectivityState == connectivity.Idle { - sd.subConn.Connect() - } - } -} - -func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { - activeSD, found := b.subConns.Get(sd.addr) - return found && activeSD == sd -} - -func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes - // this SubConn. - if !b.isActiveSCData(sd) { - return - } - sd.effectiveState = state.ConnectivityState - switch state.ConnectivityState { - case connectivity.Ready: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - case connectivity.TransientFailure: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, - }) - case connectivity.Connecting: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - default: - b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) - } -} - -// updateBalancerState stores the state reported to the channel and calls -// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate -// updates to the channel. -func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { - // In case of TransientFailures allow the picker to be updated to update - // the connectivity error, in all other cases don't send duplicate state - // updates. - if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { - return - } - b.forceUpdateConcludedStateLocked(newState) -} - -// forceUpdateConcludedStateLocked stores the state reported to the channel and -// calls ClientConn.UpdateState(). -// A separate function is defined to force update the ClientConn state since the -// channel doesn't correctly assume that LB policies start in CONNECTING and -// relies on LB policy to send an initial CONNECTING update. -func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { - b.state = newState.ConnectivityState - b.cc.UpdateState(newState) -} - -type picker struct { - result balancer.PickResult - err error -} - -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return p.result, p.err -} - -// idlePicker is used when the SubConn is IDLE and kicks the SubConn into -// CONNECTING when Pick is called. -type idlePicker struct { - exitIdle func() -} - -func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.exitIdle() - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} - -// addressList manages sequentially iterating over addresses present in a list -// of endpoints. It provides a 1 dimensional view of the addresses present in -// the endpoints. -// This type is not safe for concurrent access. -type addressList struct { - addresses []resolver.Address - idx int -} - -func (al *addressList) isValid() bool { - return al.idx < len(al.addresses) -} - -func (al *addressList) size() int { - return len(al.addresses) -} - -// increment moves to the next index in the address list. -// This method returns false if it went off the list, true otherwise. -func (al *addressList) increment() bool { - if !al.isValid() { - return false - } - al.idx++ - return al.idx < len(al.addresses) -} - -// currentAddress returns the current address pointed to in the addressList. -// If the list is in an invalid state, it returns an empty address instead. -func (al *addressList) currentAddress() resolver.Address { - if !al.isValid() { - return resolver.Address{} - } - return al.addresses[al.idx] -} - -func (al *addressList) reset() { - al.idx = 0 -} - -func (al *addressList) updateAddrs(addrs []resolver.Address) { - al.addresses = addrs - al.reset() -} - -// seekTo returns false if the needle was not found and the current index was -// left unchanged. -func (al *addressList) seekTo(needle resolver.Address) bool { - for ai, addr := range al.addresses { - if !equalAddressIgnoringBalAttributes(&addr, &needle) { - continue - } - al.idx = ai - return true - } - return false -} - -// hasNext returns whether incrementing the addressList will result in moving -// past the end of the list. If the list has already moved past the end, it -// returns false. -func (al *addressList) hasNext() bool { - if !al.isValid() { - return false - } - return al.idx+1 < len(al.addresses) -} - -// equalAddressIgnoringBalAttributes returns true is a and b are considered -// equal. This is different from the Equal method on the resolver.Address type -// which considers all fields to determine equality. Here, we only consider -// fields that are meaningful to the SubConn. -func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { - return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 35da5d1ec..22e6e3267 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/endpointsharding" - "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" ) @@ -47,7 +47,7 @@ func (bb builder) Name() string { } func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - childBuilder := balancer.Get(pickfirstleaf.Name).Build + childBuilder := balancer.Get(pickfirst.Name).Build bal := &rrBalancer{ cc: cc, Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), @@ -67,13 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ // Enable the health listener in pickfirst children for client side health // checks and outlier detection, if configured. - ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState), }) } - -func (b *rrBalancer) ExitIdle() { - // Should always be ok, as child is endpoint sharding. - if ei, ok := b.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - } -} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 948a21ef6..2c760e623 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func( if acbw.ccb.cc.dopts.disableHealthCheck { return noOpRegisterHealthListenerFn } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } regHealthLisFn := internal.RegisterClientHealthCheckListener if regHealthLisFn == nil { // The health package is not imported. - return noOpRegisterHealthListenerFn - } - cfg := acbw.ac.cc.healthCheckConfig() - if cfg == nil { + channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.") return noOpRegisterHealthListenerFn } return func(ctx context.Context, listener func(balancer.SubConnState)) func() { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 825c31795..42c61cf9f 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.5 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -858,133 +858,68 @@ func (x *Address) GetIpPort() uint32 { var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor -var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, - 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, - 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, - 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, - 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, - 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, - 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, - 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, - 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, - 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, - 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, - 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, - 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, - 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, - 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, - 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, - 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, - 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, - 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, - 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, - 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, - 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, - 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, - 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -}) +const file_grpc_binlog_v1_binarylog_proto_rawDesc = "" + + "\n" + + "\x1egrpc/binlog/v1/binarylog.proto\x12\x11grpc.binarylog.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\a\n" + + "\fGrpcLogEntry\x128\n" + + "\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x17\n" + + "\acall_id\x18\x02 \x01(\x04R\x06callId\x125\n" + + "\x17sequence_id_within_call\x18\x03 \x01(\x04R\x14sequenceIdWithinCall\x12=\n" + + "\x04type\x18\x04 \x01(\x0e2).grpc.binarylog.v1.GrpcLogEntry.EventTypeR\x04type\x12>\n" + + "\x06logger\x18\x05 \x01(\x0e2&.grpc.binarylog.v1.GrpcLogEntry.LoggerR\x06logger\x12F\n" + + "\rclient_header\x18\x06 \x01(\v2\x1f.grpc.binarylog.v1.ClientHeaderH\x00R\fclientHeader\x12F\n" + + "\rserver_header\x18\a \x01(\v2\x1f.grpc.binarylog.v1.ServerHeaderH\x00R\fserverHeader\x126\n" + + "\amessage\x18\b \x01(\v2\x1a.grpc.binarylog.v1.MessageH\x00R\amessage\x126\n" + + "\atrailer\x18\t \x01(\v2\x1a.grpc.binarylog.v1.TrailerH\x00R\atrailer\x12+\n" + + "\x11payload_truncated\x18\n" + + " \x01(\bR\x10payloadTruncated\x12.\n" + + "\x04peer\x18\v \x01(\v2\x1a.grpc.binarylog.v1.AddressR\x04peer\"\xf5\x01\n" + + "\tEventType\x12\x16\n" + + "\x12EVENT_TYPE_UNKNOWN\x10\x00\x12\x1c\n" + + "\x18EVENT_TYPE_CLIENT_HEADER\x10\x01\x12\x1c\n" + + "\x18EVENT_TYPE_SERVER_HEADER\x10\x02\x12\x1d\n" + + "\x19EVENT_TYPE_CLIENT_MESSAGE\x10\x03\x12\x1d\n" + + "\x19EVENT_TYPE_SERVER_MESSAGE\x10\x04\x12 \n" + + "\x1cEVENT_TYPE_CLIENT_HALF_CLOSE\x10\x05\x12\x1d\n" + + "\x19EVENT_TYPE_SERVER_TRAILER\x10\x06\x12\x15\n" + + "\x11EVENT_TYPE_CANCEL\x10\a\"B\n" + + "\x06Logger\x12\x12\n" + + "\x0eLOGGER_UNKNOWN\x10\x00\x12\x11\n" + + "\rLOGGER_CLIENT\x10\x01\x12\x11\n" + + "\rLOGGER_SERVER\x10\x02B\t\n" + + "\apayload\"\xbb\x01\n" + + "\fClientHeader\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" + + "\vmethod_name\x18\x02 \x01(\tR\n" + + "methodName\x12\x1c\n" + + "\tauthority\x18\x03 \x01(\tR\tauthority\x123\n" + + "\atimeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"G\n" + + "\fServerHeader\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\"\xb1\x01\n" + + "\aTrailer\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" + + "\vstatus_code\x18\x02 \x01(\rR\n" + + "statusCode\x12%\n" + + "\x0estatus_message\x18\x03 \x01(\tR\rstatusMessage\x12%\n" + + "\x0estatus_details\x18\x04 \x01(\fR\rstatusDetails\"5\n" + + "\aMessage\x12\x16\n" + + "\x06length\x18\x01 \x01(\rR\x06length\x12\x12\n" + + "\x04data\x18\x02 \x01(\fR\x04data\"B\n" + + "\bMetadata\x126\n" + + "\x05entry\x18\x01 \x03(\v2 .grpc.binarylog.v1.MetadataEntryR\x05entry\"7\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"\xb8\x01\n" + + "\aAddress\x123\n" + + "\x04type\x18\x01 \x01(\x0e2\x1f.grpc.binarylog.v1.Address.TypeR\x04type\x12\x18\n" + + "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x17\n" + + "\aip_port\x18\x03 \x01(\rR\x06ipPort\"E\n" + + "\x04Type\x12\x10\n" + + "\fTYPE_UNKNOWN\x10\x00\x12\r\n" + + "\tTYPE_IPV4\x10\x01\x12\r\n" + + "\tTYPE_IPV6\x10\x02\x12\r\n" + + "\tTYPE_UNIX\x10\x03B\\\n" + + "\x14io.grpc.binarylog.v1B\x0eBinaryLogProtoP\x01Z2google.golang.org/grpc/binarylog/grpc_binarylog_v1b\x06proto3" var ( file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 4f350ca56..c0c2c9a76 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -40,11 +40,12 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/stats" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. @@ -208,9 +209,10 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.pickerWrapper = newPickerWrapper() - cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...) cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) @@ -456,7 +458,7 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) cc.channelz = channelz.RegisterChannel(parentChannel, target) - cc.addTraceEvent("created") + cc.addTraceEvent(fmt.Sprintf("created for target %q", target)) } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -621,7 +623,8 @@ type ClientConn struct { channelz *channelz.Channel // Channelz object. resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager - metricsRecorderList *stats.MetricsRecorderList + metricsRecorderList *istats.MetricsRecorderList + statsHandler stats.Handler // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -689,22 +692,31 @@ func (cc *ClientConn) Connect() { cc.mu.Unlock() } -// waitForResolvedAddrs blocks until the resolver has provided addresses or the -// context expires. Returns nil unless the context expires first; otherwise -// returns a status error based on the context. -func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { +// waitForResolvedAddrs blocks until the resolver provides addresses or the +// context expires, whichever happens first. +// +// Error is nil unless the context expires first; otherwise returns a status +// error based on the context. +// +// The returned boolean indicates whether it did block or not. If the +// resolution has already happened once before, it returns false without +// blocking. Otherwise, it wait for the resolution and return true if +// resolution has succeeded or return false along with error if resolution has +// failed. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) (bool, error) { // This is on the RPC path, so we use a fast path to avoid the // more-expensive "select" below after the resolver has returned once. if cc.firstResolveEvent.HasFired() { - return nil + return false, nil } + internal.NewStreamWaitingForResolver() select { case <-cc.firstResolveEvent.Done(): - return nil + return true, nil case <-ctx.Done(): - return status.FromContextError(ctx.Err()).Err() + return false, status.FromContextError(ctx.Err()).Err() case <-cc.ctx.Done(): - return ErrClientConnClosing + return false, ErrClientConnClosing } } @@ -1067,13 +1079,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ - Ctx: ctx, - FullMethodName: method, - }) -} - func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. @@ -1822,7 +1827,7 @@ func (cc *ClientConn) initAuthority() error { } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { cc.authority = auth.OverrideAuthority(cc.parsedTarget) } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint + cc.authority = "localhost" + encodeAuthority(endpoint) } else { cc.authority = encodeAuthority(endpoint) } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 665e790bb..06f6c6c70 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -44,8 +44,7 @@ type PerRPCCredentials interface { // A54). uri is the URI of the entry point for the request. When supported // by the underlying implementation, ctx can be used for timeout and // cancellation. Additionally, RequestInfo data will be available via ctx - // to this call. TODO(zhaoq): Define the set of the qualified keys instead - // of leaving it as an arbitrary string. + // to this call. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. @@ -96,10 +95,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. +// ProtocolInfo provides static information regarding transport credentials. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. + // + // Deprecated: this is unused by gRPC. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string @@ -109,7 +109,16 @@ type ProtocolInfo struct { // // Deprecated: please use Peer.AuthInfo. SecurityVersion string - // ServerName is the user-configured server name. + // ServerName is the user-configured server name. If set, this overrides + // the default :authority header used for all RPCs on the channel using the + // containing credentials, unless grpc.WithAuthority is set on the channel, + // in which case that setting will take precedence. + // + // This must be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: Users should use grpc.WithAuthority to override the authority + // on a channel instead of configuring the credentials. ServerName string } @@ -120,6 +129,20 @@ type AuthInfo interface { AuthType() string } +// AuthorityValidator validates the authority used to override the `:authority` +// header. This is an optional interface that implementations of AuthInfo can +// implement if they support per-RPC authority overrides. It is invoked when the +// application attempts to override the HTTP/2 `:authority` header using the +// CallAuthority call option. +type AuthorityValidator interface { + // ValidateAuthority checks the authority value used to override the + // `:authority` header. The authority parameter is the override value + // provided by the application via the CallAuthority option. This value + // typically corresponds to the server hostname or endpoint the RPC is + // targeting. It returns non-nil error if the validation fails. + ValidateAuthority(authority string) error +} + // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC // and the caller should not close rawConn. var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") @@ -159,12 +182,17 @@ type TransportCredentials interface { // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials // OverrideServerName specifies the value used for the following: + // // - verifying the hostname on the returned certificates // - as SNI in the client's handshake to support virtual hosting // - as the value for `:authority` header at stream creation time // - // Deprecated: use grpc.WithAuthority instead. Will be supported - // throughout 1.x. + // The provided string should be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: this method is unused by gRPC. Users should use + // grpc.WithAuthority to override the authority on a channel instead of + // configuring the credentials. OverrideServerName(string) error } @@ -207,14 +235,32 @@ type RequestInfo struct { AuthInfo AuthInfo } +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) return ri, ok } +// NewContextWithRequestInfo creates a new context from ctx and attaches ri to it. +// +// This RequestInfo will be accessible via RequestInfoFromContext. +// +// Intended to be used from tests for PerRPCCredentials implementations (that +// often need to check connection's SecurityLevel). Should not be used from +// non-test code: the gRPC client already prepares a context with the correct +// RequestInfo attached when calling PerRPCCredentials.GetRequestMetadata. +// +// This API is experimental. +func NewContextWithRequestInfo(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes // it possible to pass arbitrary data to the handshaker from gRPC, resolver, // balancer etc. Individual credential implementations control the actual diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 4c805c644..93156c0f3 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -30,7 +30,7 @@ import ( // NewCredentials returns a credentials which disables transport security. // // Note that using this credentials with per-RPC credentials which require -// transport security is incompatible and will cause grpc.Dial() to fail. +// transport security is incompatible and will cause RPCs to fail. func NewCredentials() credentials.TransportCredentials { return insecureTC{} } @@ -71,6 +71,12 @@ func (info) AuthType() string { return "insecure" } +// ValidateAuthority allows any value to be overridden for the :authority +// header. +func (info) ValidateAuthority(string) error { + return nil +} + // insecureBundle implements an insecure bundle. // An insecure bundle provides a thin wrapper around insecureTC to support // the credentials.Bundle interface. diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index bd5fe22b6..8277be7d6 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -22,6 +22,7 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "net" "net/url" @@ -50,6 +51,21 @@ func (t TLSInfo) AuthType() string { return "tls" } +// ValidateAuthority validates the provided authority being used to override the +// :authority header by verifying it against the peer certificates. It returns a +// non-nil error if the validation fails. +func (t TLSInfo) ValidateAuthority(authority string) error { + var errs []error + for _, cert := range t.State.PeerCertificates { + var err error + if err = cert.VerifyHostname(authority); err == nil { + return nil + } + errs = append(errs, err) + } + return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...)) +} + // cipherSuiteLookup returns the string version of a TLS cipher suite ID. func cipherSuiteLookup(cipherSuiteID uint16) string { for _, s := range tls.CipherSuites() { @@ -94,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := credinternal.CloneTLSConfig(c.config) - if cfg.ServerName == "" { - serverName, _, err := net.SplitHostPort(authority) - if err != nil { - // If the authority had no host port or if the authority cannot be parsed, use it as-is. - serverName = authority - } - cfg.ServerName = serverName + + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority } + cfg.ServerName = serverName + conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { @@ -243,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config { // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } @@ -255,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := os.ReadFile(certFile) if err != nil { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 405a2ffeb..7a5ac2e7c 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -213,6 +213,7 @@ func WithReadBufferSize(s int) DialOption { func WithInitialWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true }) } @@ -222,6 +223,26 @@ func WithInitialWindowSize(s int32) DialOption { func WithInitialConnWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticStreamWindowSize returns a DialOption which sets the initial +// stream window size to the value provided and disables dynamic flow control. +func WithStaticStreamWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticConnWindowSize returns a DialOption which sets the initial +// connection window size to the value provided and disables dynamic flow +// control. +func WithStaticConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true }) } @@ -360,7 +381,7 @@ func WithReturnConnectionError() DialOption { // // Note that using this DialOption with per-RPC credentials (through // WithCredentialsBundle or WithPerRPCCredentials) which require transport -// security is incompatible and will cause grpc.Dial() to fail. +// security is incompatible and will cause RPCs to fail. // // Deprecated: use WithTransportCredentials and insecure.NewCredentials() // instead. Will be supported throughout 1.x. @@ -587,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header and as the server name in authentication handshake. +// This overrides all other ways of setting authority on the channel, but can be +// overridden per-call by using grpc.CallAuthority. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 11d0ae142..dadd21e40 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -27,8 +27,10 @@ package encoding import ( "io" + "slices" "strings" + "google.golang.org/grpc/encoding/internal" "google.golang.org/grpc/internal/grpcutil" ) @@ -36,6 +38,24 @@ import ( // It is intended for grpc internal use only. const Identity = "identity" +func init() { + internal.RegisterCompressorForTesting = func(c Compressor) func() { + name := c.Name() + curCompressor, found := registeredCompressor[name] + RegisterCompressor(c) + return func() { + if found { + registeredCompressor[name] = curCompressor + return + } + delete(registeredCompressor, name) + grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool { + return s == name + }) + } + } +} + // Compressor is used for compressing and decompressing when sending or // receiving messages. // diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go new file mode 100644 index 000000000..ee9acb437 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the encoding package. +package internal + +// RegisterCompressorForTesting registers a compressor in the global compressor +// registry. It returns a cleanup function that should be called at the end +// of the test to unregister the compressor. +// +// This prevents compressors registered in one test from appearing in the +// encoding headers of subsequent tests. +var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func() diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index ceec319dd..1ab874c7a 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -46,9 +46,25 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } + // Important: if we remove this Size call then we cannot use + // UseCachedSize in MarshalOptions below. size := proto.Size(vv) + + // MarshalOptions with UseCachedSize allows reusing the result from the + // previous Size call. This is safe here because: + // + // 1. We just computed the size. + // 2. We assume the message is not being mutated concurrently. + // + // Important: If the proto.Size call above is removed, using UseCachedSize + // becomes unsafe and may lead to incorrect marshaling. + // + // For more details, see the doc of UseCachedSize: + // https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions + marshalOptions := proto.MarshalOptions{UseCachedSize: true} + if mem.IsBelowBufferPoolingThreshold(size) { - buf, err := proto.Marshal(vv) + buf, err := marshalOptions.Marshal(vv) if err != nil { return nil, err } @@ -56,7 +72,7 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { } else { pool := mem.DefaultBufferPool() buf := pool.Get(size) - if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil { pool.Put(buf) return nil, err } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index ad75313a1..2b57ba65a 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -75,6 +75,7 @@ const ( MetricTypeIntHisto MetricTypeFloatHisto MetricTypeIntGauge + MetricTypeIntUpDownCount ) // Int64CountHandle is a typed handle for a int count metric. This handle @@ -93,6 +94,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels . recorder.RecordInt64Count(h, incr, labels...) } +// Int64UpDownCountHandle is a typed handle for an int up-down counter metric. +// This handle is passed at the recording point in order to know which metric +// to record on. +type Int64UpDownCountHandle MetricDescriptor + +// Descriptor returns the int64 up-down counter handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 up-down counter value on the metrics recorder provided. +// The value 'v' can be positive to increment or negative to decrement. +func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) { + recorder.RecordInt64UpDownCount(h, v, labels...) +} + // Float64CountHandle is a typed handle for a float count metric. This handle is // passed at the recording point in order to know which metric to record on. type Float64CountHandle MetricDescriptor @@ -249,6 +267,21 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { return (*Int64GaugeHandle)(descPtr) } +// RegisterInt64UpDownCount registers the metric description onto the global registry. +// It returns a typed handle to use for recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle { + registerMetric(descriptor.Name, descriptor.Default) + // Set the specific metric type for the up-down counter + descriptor.Type = MetricTypeIntUpDownCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64UpDownCountHandle)(descPtr) +} + // snapshotMetricsRegistryForTesting snapshots the global data of the metrics // registry. Returns a cleanup function that sets the metrics registry to its // original state. diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index ee1423605..cb57f1a74 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -38,6 +38,9 @@ type MetricsRecorder interface { // RecordInt64Gauge records the measurement alongside labels on the int // gauge associated with the provided handle. RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) + // RecordInt64UpDownCounter records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string) } // Metrics is an experimental legacy alias of the now-stable stats.MetricSet. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index faa59e418..8f7d9f6bb 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.5 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -261,63 +261,29 @@ func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse { var File_grpc_health_v1_health_proto protoreflect.FileDescriptor -var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ - 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, - 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, - 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, - 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, - 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, - 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x03, 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, - 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, - 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x1a, 0x60, 0x0a, 0x0d, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xfd, 0x01, - 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, - 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x4c, 0x69, - 0x73, 0x74, 0x12, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x05, 0x57, 0x61, 0x74, - 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x70, 0x0a, - 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, - 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x76, 0x31, 0xa2, - 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x56, 0x31, 0xaa, 0x02, - 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_grpc_health_v1_health_proto_rawDesc = "" + + "\n" + + "\x1bgrpc/health/v1/health.proto\x12\x0egrpc.health.v1\".\n" + + "\x12HealthCheckRequest\x12\x18\n" + + "\aservice\x18\x01 \x01(\tR\aservice\"\xb1\x01\n" + + "\x13HealthCheckResponse\x12I\n" + + "\x06status\x18\x01 \x01(\x0e21.grpc.health.v1.HealthCheckResponse.ServingStatusR\x06status\"O\n" + + "\rServingStatus\x12\v\n" + + "\aUNKNOWN\x10\x00\x12\v\n" + + "\aSERVING\x10\x01\x12\x0f\n" + + "\vNOT_SERVING\x10\x02\x12\x13\n" + + "\x0fSERVICE_UNKNOWN\x10\x03\"\x13\n" + + "\x11HealthListRequest\"\xc4\x01\n" + + "\x12HealthListResponse\x12L\n" + + "\bstatuses\x18\x01 \x03(\v20.grpc.health.v1.HealthListResponse.StatusesEntryR\bstatuses\x1a`\n" + + "\rStatusesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x129\n" + + "\x05value\x18\x02 \x01(\v2#.grpc.health.v1.HealthCheckResponseR\x05value:\x028\x012\xfd\x01\n" + + "\x06Health\x12P\n" + + "\x05Check\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse\x12M\n" + + "\x04List\x12!.grpc.health.v1.HealthListRequest\x1a\".grpc.health.v1.HealthListResponse\x12R\n" + + "\x05Watch\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse0\x01Bp\n" + + "\x11io.grpc.health.v1B\vHealthProtoP\x01Z,google.golang.org/grpc/health/grpc_health_v1\xa2\x02\fGrpcHealthV1\xaa\x02\x0eGrpc.Health.V1b\x06proto3" var ( file_grpc_health_v1_health_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 93136610e..f2c01f296 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -188,13 +188,13 @@ type HealthServer interface { type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") + return nil, status.Error(codes.Unimplemented, "method Check not implemented") } func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") + return nil, status.Error(codes.Unimplemented, "method List not implemented") } func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") + return status.Error(codes.Unimplemented, "method Watch not implemented") } func (UnimplementedHealthServer) testEmbeddedByValue() {} diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index d4b4b7081..d8eebe99d 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -30,6 +30,13 @@ import ( "google.golang.org/grpc/status" ) +const ( + // maxAllowedServices defines the maximum number of resources a List + // operation can return. An error is returned if the number of services + // exceeds this limit. + maxAllowedServices = 100 +) + // Server implements `service Health`. type Server struct { healthgrpc.UnimplementedHealthServer @@ -62,6 +69,23 @@ func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*hea return nil, status.Error(codes.NotFound, "unknown service") } +// List implements `service Health`. +func (s *Server) List(_ context.Context, _ *healthpb.HealthListRequest) (*healthpb.HealthListResponse, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + if len(s.statusMap) > maxAllowedServices { + return nil, status.Errorf(codes.ResourceExhausted, "server health list exceeds maximum capacity: %d", maxAllowedServices) + } + + statusMap := make(map[string]*healthpb.HealthCheckResponse, len(s.statusMap)) + for k, v := range s.statusMap { + statusMap[k] = &healthpb.HealthCheckResponse{Status: v} + } + + return &healthpb.HealthListResponse{Statuses: statusMap}, nil +} + // Watch implements `service Health`. func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { service := in.Service diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index fbc1ca356..ba25b8988 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -223,15 +223,7 @@ func (gsb *Balancer) ExitIdle() { // There is no need to protect this read with a mutex, as the write to the // Balancer field happens in SwitchTo, which completes before this can be // called. - if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - return - } - gsb.mu.Lock() - defer gsb.mu.Unlock() - for sc := range balToUpdate.subconns { - sc.Connect() - } + balToUpdate.ExitIdle() } // updateSubConnState forwards the update to the appropriate child. diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 11f91668a..467392b8d 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -83,6 +83,7 @@ func (b *Unbounded) Load() { default: } } else if b.closing && !b.closed { + b.closed = true close(b.c) } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 2bffe4777..3b7ba5966 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -194,7 +194,7 @@ func (r RefChannelType) String() string { // If channelz is not turned ON, this will simply log the event descriptions. func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { // Log only the trace description associated with the bottom most entity. - d := fmt.Sprintf("[%s]%s", e, desc.Desc) + d := fmt.Sprintf("[%s] %s", e, desc.Desc) switch desc.Severity { case CtUnknown, CtInfo: l.InfoDepth(depth+1, d) diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 9deee7f65..48b22d9cf 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -20,20 +20,6 @@ import ( "context" ) -// requestInfoKey is a struct to be used as the key to store RequestInfo in a -// context. -type requestInfoKey struct{} - -// NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri any) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) -} - -// RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) any { - return ctx.Value(requestInfoKey{}) -} - // clientHandshakeInfoKey is a struct used as the key to store // ClientHandshakeInfo in a context. type clientHandshakeInfoKey struct{} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index cc5713fd9..91f760936 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,35 +26,31 @@ import ( ) var ( - // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + // EnableTXTServiceConfig is set if the DNS resolver should perform TXT + // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not + // "false"). + EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true) + + // TXTErrIgnore is set if TXT errors should be ignored + // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // LeastRequestLB is set if we should support the least_request_experimental - // LB policy, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". - LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // XDSFallbackSupport is the env variable that controls whether support for - // xDS fallback is turned on. If this is unset or is false, only the first - // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) - // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "false". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true) // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by @@ -69,6 +65,18 @@ var ( // to gRFC A76. It can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true". RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false) + + // ALTSHandshakerKeepaliveParams is set if we should add the + // KeepaliveParams when dial the ALTS handshaker service. + ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false) + + // EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443 + // to a target address that lacks one. This flag only has an effect when all of + // the following conditions are met: + // - A connect proxy is being used. + // - Target resolution is disabled. + // - The DNS resolver is being used. + EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 2eb97f832..7685d08b5 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -63,4 +63,20 @@ var ( // For more details, see: // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md. XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false) + + // XDSSPIFFEEnabled controls if SPIFFE Bundle Maps can be used as roots of + // trust. For more details, see: + // https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md + XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false) + + // XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata + // configuring use of an HTTP CONNECT proxy via xDS from cluster resources. + // For more details, see: + // https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md + XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false) + + // XDSBootstrapCallCredsEnabled controls if call credentials can be used in + // xDS bootstrap configuration via the `call_creds` field. For more details, + // see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md + XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false) ) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 8e8e86128..9b6d8a1fa 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -80,25 +80,11 @@ func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func (cs *CallbackSerializer) run(ctx context.Context) { defer close(cs.done) - // TODO: when Go 1.21 is the oldest supported version, this loop and Close - // can be replaced with: - // - // context.AfterFunc(ctx, cs.callbacks.Close) - for ctx.Err() == nil { - select { - case <-ctx.Done(): - // Do nothing here. Next iteration of the for loop will not happen, - // since ctx.Err() would be non-nil. - case cb := <-cs.callbacks.Get(): - cs.callbacks.Load() - cb.(func(context.Context))(ctx) - } - } - - // Close the buffer to prevent new callbacks from being added. - cs.callbacks.Close() + // Close the buffer when the context is canceled + // to prevent new callbacks from being added. + context.AfterFunc(ctx, cs.callbacks.Close) - // Run all pending callbacks. + // Run all callbacks. for cb := range cs.callbacks.Get() { cs.callbacks.Load() cb.(func(context.Context))(ctx) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go index fbe697c37..d788c2493 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -21,28 +21,25 @@ package grpcsync import ( - "sync" "sync/atomic" ) // Event represents a one-time event that may occur in the future. type Event struct { - fired int32 + fired atomic.Bool c chan struct{} - o sync.Once } // Fire causes e to complete. It is safe to call multiple times, and // concurrently. It returns true iff this call to Fire caused the signaling -// channel returned by Done to close. +// channel returned by Done to close. If Fire returns false, it is possible +// the Done channel has not been closed yet. func (e *Event) Fire() bool { - ret := false - e.o.Do(func() { - atomic.StoreInt32(&e.fired, 1) + if e.fired.CompareAndSwap(false, true) { close(e.c) - ret = true - }) - return ret + return true + } + return false } // Done returns a channel that will be closed when Fire is called. @@ -52,7 +49,7 @@ func (e *Event) Done() <-chan struct{} { // HasFired returns true if Fire has been called. func (e *Event) HasFired() bool { - return atomic.LoadInt32(&e.fired) == 1 + return e.fired.Load() } // NewEvent returns a new, ready-to-use Event. diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 2ce012cda..2699223a2 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -182,35 +182,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster - // Specifier Plugin for testing purposes, regardless of the XDSRLS environment - // variable. - // - // TODO: Remove this function once the RLS env var is removed. - RegisterRLSClusterSpecifierPluginForTesting func() - - // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster - // Specifier Plugin for testing purposes. This is needed because there is no way - // to unregister the RLS Cluster Specifier Plugin after registering it solely - // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). - // - // TODO: Remove this function once the RLS env var is removed. - UnregisterRLSClusterSpecifierPluginForTesting func() - - // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing - // purposes, regardless of the RBAC environment variable. - // - // TODO: Remove this function once the RBAC env var is removed. - RegisterRBACHTTPFilterForTesting func() - - // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for - // testing purposes. This is needed because there is no way to unregister the - // HTTP Filter after registering it solely for testing purposes using - // RegisterRBACHTTPFilterForTesting(). - // - // TODO: Remove this function once the RBAC env var is removed. - UnregisterRBACHTTPFilterForTesting func() - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) @@ -266,6 +237,13 @@ var ( TimeAfterFunc = func(d time.Duration, f func()) Timer { return time.AfterFunc(d, f) } + + // NewStreamWaitingForResolver is a test hook that is triggered when a + // new stream blocks while waiting for name resolution. This can be + // used in tests to synchronize resolver updates and avoid race conditions. + // When set, the function will be called before the stream enters + // the blocking state. + NewStreamWaitingForResolver = func() {} ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index 20b8fb098..5bfa67b72 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -22,11 +22,13 @@ package delegatingresolver import ( "fmt" + "net" "net/http" "net/url" "sync" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/proxyattributes" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/internal/transport/networktype" @@ -40,6 +42,8 @@ var ( HTTPSProxyFromEnvironment = http.ProxyFromEnvironment ) +const defaultPort = "443" + // delegatingResolver manages both target URI and proxy address resolution by // delegating these tasks to separate child resolvers. Essentially, it acts as // an intermediary between the gRPC ClientConn and the child resolvers. @@ -107,10 +111,18 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti targetResolver: nopResolver{}, } + addr := target.Endpoint() var err error - r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget { + addr, err = parseTarget(addr) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err) + } + } + + r.proxyURL, err = proxyURLForTarget(addr) if err != nil { - return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err) } // proxy is not configured or proxy address excluded using `NO_PROXY` env @@ -132,8 +144,8 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti // bypass the target resolver and store the unresolved target address. if target.URL.Scheme == "dns" && !targetResolutionEnabled { r.targetResolverState = &resolver.State{ - Addresses: []resolver.Address{{Addr: target.Endpoint()}}, - Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, + Addresses: []resolver.Address{{Addr: addr}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}}, } r.updateTargetResolverState(*r.targetResolverState) return r, nil @@ -202,6 +214,44 @@ func needsProxyResolver(state *resolver.State) bool { return false } +// parseTarget takes a target string and ensures it is a valid "host:port" target. +// +// It does the following: +// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"), +// it is returned as is. +// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost", +// returning "localhost:80". +// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort +// is added. +// +// An error is returned for empty targets or targets with a trailing colon +// but no port (e.g., "host:"). +func parseTarget(target string) (string, error) { + if target == "" { + return "", fmt.Errorf("missing address") + } + + host, port, err := net.SplitHostPort(target) + if err != nil { + // If SplitHostPort fails, it's likely because the port is missing. + // We append the default port and return the result. + return net.JoinHostPort(target, defaultPort), nil + } + + // If SplitHostPort succeeds, we check for edge cases. + if port == "" { + // A success with an empty port means the target had a trailing colon, + // e.g., "host:", which is an error. + return "", fmt.Errorf("missing port after port-separator colon") + } + if host == "" { + // A success with an empty host means the target was like ":80". + // We default the host to "localhost". + host = "localhost" + } + return net.JoinHostPort(host, port), nil +} + func skipProxy(address resolver.Address) bool { // Avoid proxy when network is not tcp. networkType, ok := networktype.Get(address) diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index ba5c5a95d..ada5251cf 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig, } d.resolver, err = internal.NewNetResolver(target.URL.Host) @@ -181,8 +181,8 @@ type dnsResolver struct { // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool + wg sync.WaitGroup + enableServiceConfig bool } // ResolveNow invoke an immediate resolution of the target that this @@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if len(srv) > 0 { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } - if !d.disableServiceConfig { + if d.enableServiceConfig { state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index 79044657b..d5f7e4d62 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordInt64UpDownCount records the measurement alongside labels on the int +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64UpDownCount(handle, incr, labels...) + } +} + // RecordFloat64Count records the measurement alongside labels on the float // count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go new file mode 100644 index 000000000..49019b80d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/stats.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + + "google.golang.org/grpc/stats" +) + +type combinedHandler struct { + handlers []stats.Handler +} + +// NewCombinedHandler combines multiple stats.Handlers into a single handler. +// +// It returns nil if no handlers are provided. If only one handler is +// provided, it is returned directly without wrapping. +func NewCombinedHandler(handlers ...stats.Handler) stats.Handler { + switch len(handlers) { + case 0: + return nil + case 1: + return handlers[0] + default: + return &combinedHandler{handlers: handlers} + } +} + +func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagRPC(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) { + for _, h := range ch.handlers { + h.HandleRPC(ctx, stats) + } +} + +func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagConn(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) { + for _, h := range ch.handlers { + h.HandleConn(ctx, stats) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 1186f1e9a..aad171cd0 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -236,3 +236,11 @@ func IsRestrictedControlPlaneCode(s *Status) bool { } return false } + +// RawStatusProto returns the internal protobuf message for use by gRPC itself. +func RawStatusProto(s *Status) *spb.Status { + if s == nil { + return nil + } + return s.s +} diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go index ccc0e017e..980452519 100644 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -29,25 +29,27 @@ import ( // ClientStream implements streaming functionality for a gRPC client. type ClientStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. ct *http2Client done chan struct{} // closed at the end of stream to unblock writers. doneFunc func() // invoked at the end of stream. - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + headerChan chan struct{} // closed to indicate the end of header metadata. + header metadata.MD // the received header metadata + + status *status.Status // the status error received from the server + + // Non-pointer fields are at the end to optimize GC allocations. + // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). - headerValid bool - header metadata.MD // the received header metadata - noHeaders bool // set if the client never received headers (set only after the stream is done). - - bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream - unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream - - status *status.Status // the status error received from the server + headerValid bool + noHeaders bool // set if the client never received headers (set only after the stream is done). + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream } // Read reads an n byte message from the input stream. @@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool { func (s *ClientStream) Status() *status.Status { return s.status } + +func (s *ClientStream) requestRead(n int) { + s.ct.adjustWindow(s, uint32(n)) +} + +func (s *ClientStream) updateWindow(n int) { + s.ct.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ef72fbb3a..2dcd1e63b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,6 +40,13 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { e.SetMaxDynamicTableSizeLimit(v) } +// itemNodePool is used to reduce heap allocations. +var itemNodePool = sync.Pool{ + New: func() any { + return &itemNode{} + }, +} + type itemNode struct { it any next *itemNode @@ -51,7 +58,9 @@ type itemList struct { } func (il *itemList) enqueue(i any) { - n := &itemNode{it: i} + n := itemNodePool.Get().(*itemNode) + n.next = nil + n.it = i if il.tail == nil { il.head, il.tail = n, n return @@ -71,7 +80,9 @@ func (il *itemList) dequeue() any { return nil } i := il.head.it + temp := il.head il.head = il.head.next + itemNodePool.Put(temp) if il.head == nil { il.tail = nil } @@ -146,10 +157,11 @@ type earlyAbortStream struct { func (*earlyAbortStream) isTransportResponseFrame() bool { return false } type dataFrame struct { - streamID uint32 - endStream bool - h []byte - reader mem.Reader + streamID uint32 + endStream bool + h []byte + data mem.BufferSlice + processing bool // onEachWrite is called every time // a part of data is written out. onEachWrite func() @@ -234,6 +246,7 @@ type outStream struct { itl *itemList bytesOutStanding int wq *writeQuota + reader mem.Reader next *outStream prev *outStream @@ -461,7 +474,9 @@ func (c *controlBuffer) finish() { v.onOrphaned(ErrConnClosing) } case *dataFrame: - _ = v.reader.Close() + if !v.processing { + v.data.Free() + } } } @@ -481,6 +496,16 @@ const ( serverSide ) +// maxWriteBufSize is the maximum length (number of elements) the cached +// writeBuf can grow to. The length depends on the number of buffers +// contained within the BufferSlice produced by the codec, which is +// generally small. +// +// If a writeBuf larger than this limit is required, it will be allocated +// and freed after use, rather than being cached. This avoids holding +// on to large amounts of memory. +const maxWriteBufSize = 64 + // Loopy receives frames from the control buffer. // Each frame is handled individually; most of the work done by loopy goes // into handling data frames. Loopy maintains a queue of active streams, and each @@ -515,6 +540,8 @@ type loopyWriter struct { // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) + + writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek. } func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { @@ -790,10 +817,13 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // a RST_STREAM before stream initialization thus the stream might // not be established yet. delete(l.estdStreams, c.streamID) + str.reader.Close() str.deleteSelf() for head := str.itl.dequeueAll(); head != nil; head = head.next { if df, ok := head.it.(*dataFrame); ok { - _ = df.reader.Close() + if !df.processing { + df.data.Free() + } } } } @@ -928,7 +958,13 @@ func (l *loopyWriter) processData() (bool, error) { if str == nil { return true, nil } + reader := &str.reader dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + if !dataItem.processing { + dataItem.processing = true + reader.Reset(dataItem.data) + dataItem.data.Free() + } // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. // Every dataFrame has two buffers; h that keeps grpc-message header and data @@ -936,13 +972,13 @@ func (l *loopyWriter) processData() (bool, error) { // from data is copied to h to make as big as the maximum possible HTTP2 frame // size. - if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame + if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true - if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = dataItem.reader.Close() + reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -971,29 +1007,24 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, dataItem.reader.Remaining()) - remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + dSize := min(maxSize-hSize, reader.Remaining()) + remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize size := hSize + dSize - var buf *[]byte - - if hSize != 0 && dSize == 0 { - buf = &dataItem.h - } else { - // Note: this is only necessary because the http2.Framer does not support - // partially writing a frame, so the sequence must be materialized into a buffer. - // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. - pool := l.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() + l.writeBuf = l.writeBuf[:0] + if hSize > 0 { + l.writeBuf = append(l.writeBuf, dataItem.h[:hSize]) + } + if dSize > 0 { + var err error + l.writeBuf, err = reader.Peek(dSize, l.writeBuf) + if err != nil { + // This must never happen since the reader must have at least dSize + // bytes. + // Log an error to fail tests. + l.logger.Errorf("unexpected error while reading Data frame payload: %v", err) + return false, err } - buf = pool.Get(size) - defer pool.Put(buf) - - copy((*buf)[:hSize], dataItem.h) - _, _ = dataItem.reader.Read((*buf)[hSize:]) } // Now that outgoing flow controls are checked we can replenish str's write quota @@ -1006,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) { if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { + err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf) + reader.Discard(dSize) + if cap(l.writeBuf) > maxWriteBufSize { + l.writeBuf = nil + } else { + clear(l.writeBuf) + } + if err != nil { return false, err } str.bytesOutStanding += size @@ -1014,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) { dataItem.h = dataItem.h[hSize:] if remainingBytes == 0 { // All the data from that message was written out. - _ = dataItem.reader.Close() + reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index dfc0f224e..7cfbc9637 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -28,7 +28,7 @@ import ( // writeQuota is a soft limit on the amount of data a stream can // schedule before some of it is written out. type writeQuota struct { - quota int32 + _ noCopy // get waits on read from when quota goes less than or equal to zero. // replenish writes on it when quota goes positive again. ch chan struct{} @@ -38,16 +38,17 @@ type writeQuota struct { // It is implemented as a field so that it can be updated // by tests. replenish func(n int) + quota int32 } -func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { - w := &writeQuota{ - quota: sz, - ch: make(chan struct{}, 1), - done: done, - } +// init allows a writeQuota to be initialized in-place, which is useful for +// resetting a buffer or for avoiding a heap allocation when the buffer is +// embedded in another struct. +func (w *writeQuota) init(sz int32, done <-chan struct{}) { + w.quota = sz + w.ch = make(chan struct{}, 1) + w.done = done w.replenish = w.realReplenish - return w } func (w *writeQuota) get(sz int32) error { @@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error { func (w *writeQuota) realReplenish(n int) { sz := int32(n) - a := atomic.AddInt32(&w.quota, sz) - b := a - sz - if b <= 0 && a > 0 { + newQuota := atomic.AddInt32(&w.quota, sz) + previousQuota := newQuota - sz + if previousQuota <= 0 && newQuota > 0 { select { case w.ch <- struct{}{}: default: diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 3dea23573..7ab3422b8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -170,7 +170,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats stats.Handler logger *grpclog.PrefixLogger bufferPool mem.BufferPool @@ -274,14 +274,14 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status } }) - if err == nil { // transport has not been closed + if err == nil && ht.stats != nil { // transport has not been closed // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - for _, sh := range ht.stats { - sh.HandleRPC(s.Context(), &stats.OutTrailer{ - Trailer: s.trailer.Copy(), - }) - } + s.hdrMu.Lock() + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + s.hdrMu.Unlock() } ht.Close(errors.New("finished writing status")) return err @@ -372,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e ht.rw.(http.Flusher).Flush() }) - if err == nil { - for _, sh := range ht.stats { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), - Compression: s.sendCompress, - }) - } + if err == nil && ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) } return err } +func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) { +} + +func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) { +} + func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc @@ -409,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req s := &ServerStream{ - Stream: &Stream{ + Stream: Stream{ id: 0, // irrelevant ctx: ctx, - requestRead: func(int) {}, - buf: newRecvBuffer(), method: req.URL.Path, recvCompress: req.Header.Get("grpc-encoding"), contentSubtype: ht.contentSubtype, @@ -422,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream st: ht, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } - s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, - windowHandler: func(int) {}, + s.Stream.buf.init() + s.readRequester = s + s.trReader = transportReader{ + reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf}, + windowHandler: s, } // readerDone is closed when the Body.Read-ing goroutine exits. diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 171e690a3..65b4ab243 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -44,6 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/proxyattributes" + istats "google.golang.org/grpc/internal/stats" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -105,7 +106,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandlers []stats.Handler + statsHandler stats.Handler initialWindowSize int32 @@ -309,11 +310,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts scheme = "https" } } - dynamicWindow := true icwz := int32(initialWindowSize) if opts.InitialConnWindowSize >= defaultWindowSize { icwz = opts.InitialConnWindowSize - dynamicWindow = false } writeBufSize := opts.WriteBufferSize readBufSize := opts.ReadBufferSize @@ -337,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts writerDone: make(chan struct{}), goAway: make(chan struct{}), keepaliveDone: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandlers: opts.StatsHandlers, + statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...), initialWindowSize: initialWindowSize, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, @@ -381,23 +380,21 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize - dynamicWindow = false } - if dynamicWindow { + if !opts.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.statsHandlers { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) - connBegin := &stats.ConnBegin{ + t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{ Client: true, - } - sh.HandleConn(t.ctx, connBegin) + }) } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -484,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &ClientStream{ - Stream: &Stream{ + Stream: Stream{ method: callHdr.Method, sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), contentSubtype: callHdr.ContentSubtype, }, ct: t, @@ -495,26 +491,21 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt headerChan: make(chan struct{}), doneFunc: callHdr.DoneFunc, } - s.wq = newWriteQuota(defaultWriteQuota, s.done) - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.Stream.buf.init() + s.Stream.wq.init(defaultWriteQuota, s.done) + s.readRequester = s // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctx.Done(), - recv: s.buf, - closeStream: func(err error) { - s.Close(err) - }, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + s.trReader = transportReader{ + reader: recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: &s.buf, + clientStream: s, }, + windowHandler: s, } return s } @@ -545,7 +536,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + ctxWithRequestInfo := credentials.NewContextWithRequestInfo(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err @@ -559,6 +550,19 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Make the slice of certain predictable size to reduce allocations made by append. hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te hfLen += len(authData) + len(callAuthData) + registeredCompressors := t.registeredCompressors + if callHdr.PreviousAttempts > 0 { + hfLen++ + } + if callHdr.SendCompress != "" { + hfLen++ + } + if registeredCompressors != "" { + hfLen++ + } + if _, ok := ctx.Deadline(); ok { + hfLen++ + } headerFields := make([]hpack.HeaderField, 0, hfLen) headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) @@ -571,7 +575,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } - registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) // Include the outgoing compressor name when compressor is not registered @@ -592,6 +595,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := time.Until(dl) + if timeout <= 0 { + return nil, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) } for k, v := range authData { @@ -749,6 +755,25 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS callHdr = &newCallHdr } + // The authority specified via the `CallAuthority` CallOption takes the + // highest precedence when determining the `:authority` header. It overrides + // any value present in the Host field of CallHdr. Before applying this + // override, the authority string is validated. If the credentials do not + // implement the AuthorityValidator interface, or if validation fails, the + // RPC is failed with a status code of `UNAVAILABLE`. + if callHdr.Authority != "" { + auth, ok := t.authInfo.(credentials.AuthorityValidator) + if !ok { + return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "credentials type %q does not implement the AuthorityValidator interface, but authority override specified with CallAuthority call option", t.authInfo.AuthType())} + } + if err := auth.ValidateAuthority(callHdr.Authority); err != nil { + return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "failed to validate authority %q : %v", callHdr.Authority, err)} + } + newCallHdr := *callHdr + newCallHdr.Host = callHdr.Authority + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -792,7 +817,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil }, onOrphaned: cleanup, - wq: s.wq, + wq: &s.wq, } firstTry := true var ch chan struct{} @@ -823,7 +848,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS transportDrainRequired = t.nextID > MaxStreamID s.id = hdr.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + s.fc = inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() @@ -874,27 +899,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if len(t.statsHandlers) != 0 { + if t.statsHandler != nil { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - for _, sh := range t.statsHandlers { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - // Note: Creating a new stats object to prevent pollution. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, - } - sh.HandleRPC(s.ctx, outHeader) - } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + }) } if transportDrainRequired { if t.logger.V(logLevel) { @@ -971,6 +992,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode // accessed anymore. func (t *http2Client) Close(err error) { t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) + // For background on the deadline value chosen here, see + // https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 . + t.conn.SetReadDeadline(time.Now().Add(time.Second)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1032,11 +1056,10 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - for _, sh := range t.statsHandlers { - connEnd := &stats.ConnEnd{ + if t.statsHandler != nil { + t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{ Client: true, - } - sh.HandleConn(t.ctx, connEnd) + }) } } @@ -1069,32 +1092,29 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { - reader := data.Reader() - if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { - _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { - _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - reader: reader, + data: data, } - if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return err } } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } t.incrMsgSent() @@ -1150,7 +1170,7 @@ func (t *http2Client) updateFlowControl(n uint32) { }) } -func (t *http2Client) handleData(f *http2.DataFrame) { +func (t *http2Client) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -1194,22 +1214,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } // The server has closed the stream without sending trailers. Record that @@ -1449,17 +1462,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string recvCompress string - httpStatusCode *int httpStatusErr string - rawStatusCode = codes.Unknown + // the code from the grpc-status header, if present + grpcStatusCode = codes.Unknown // headerError is set if an error is encountered while parsing the headers headerError string + httpStatus string ) - if initialHeader { - httpStatusErr = "malformed header: missing HTTP status" - } - for _, hf := range frame.Fields { switch hf.Name { case "content-type": @@ -1479,31 +1489,11 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } - rawStatusCode = codes.Code(uint32(code)) + grpcStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) case ":status": - if hf.Value == "200" { - httpStatusErr = "" - statusCode := 200 - httpStatusCode = &statusCode - break - } - - c, err := strconv.ParseInt(hf.Value, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - statusCode := int(c) - httpStatusCode = &statusCode - - httpStatusErr = fmt.Sprintf( - "unexpected HTTP status code received from server: %d (%s)", - statusCode, - http.StatusText(statusCode), - ) + httpStatus = hf.Value default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -1518,25 +1508,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - if !isGRPC || httpStatusErr != "" { - var code = codes.Internal // when header does not include HTTP status, return INTERNAL - - if httpStatusCode != nil { + // If a non-gRPC response is received, then evaluate the HTTP status to + // process the response and close the stream. + // In case http status doesn't provide any error information (status : 200), + // then evalute response code to be Unknown. + if !isGRPC { + var grpcErrorCode = codes.Internal + if httpStatus == "" { + httpStatusErr = "malformed header: missing HTTP status" + } else { + // Parse the status codes (e.g. "200", 404"). + statusCode, err := strconv.Atoi(httpStatus) + if err != nil { + se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + if statusCode >= 100 && statusCode < 200 { + if endStream { + se := status.New(codes.Internal, fmt.Sprintf( + "protocol error: informational header with status code %d must not have END_STREAM set", statusCode)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + } + // In case of informational headers, return. + return + } + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) var ok bool - code, ok = HTTPStatusConvTab[*httpStatusCode] + grpcErrorCode, ok = HTTPStatusConvTab[statusCode] if !ok { - code = codes.Unknown + grpcErrorCode = codes.Unknown } } var errs []string if httpStatusErr != "" { errs = append(errs, httpStatusErr) } + if contentTypeErr != "" { errs = append(errs, contentTypeErr) } - // Verify the HTTP response is a 200. - se := status.New(code, strings.Join(errs, "; ")) + + se := status.New(grpcErrorCode, strings.Join(errs, "; ")) t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1567,22 +1584,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - for _, sh := range t.statsHandlers { + if t.statsHandler != nil { if !endStream { - inHeader := &stats.InHeader{ + t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + }) } else { - inTrailer := &stats.InTrailer{ + t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), - } - sh.HandleRPC(s.ctx, inTrailer) + }) } } @@ -1590,7 +1605,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. @@ -1637,7 +1652,7 @@ func (t *http2Client) reader(errCh chan<- error) { // loop to keep reading incoming messages on this transport. for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() if t.keepaliveEnabled { atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } @@ -1652,7 +1667,7 @@ func (t *http2Client) reader(errCh chan<- error) { if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] - errorDetail := t.framer.fr.ErrorDetail() + errorDetail := t.framer.errorDetail() var msg string if errorDetail != nil { msg = errorDetail.Error() @@ -1670,8 +1685,9 @@ func (t *http2Client) reader(errCh chan<- error) { switch frame := frame.(type) { case *http2.MetaHeadersFrame: t.operateHeaders(frame) - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 7e53eb173..6f78a6b0c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,13 +35,15 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/protobuf/proto" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/mem" - "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -85,7 +87,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats []stats.Handler + stats stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -131,6 +133,10 @@ type http2Server struct { maxStreamID uint32 // max stream ID ever seen logger *grpclog.PrefixLogger + // setResetPingStrikes is stored as a closure instead of making this a + // method on http2Server to avoid a heap allocation when converting a method + // to a closure for passing to frames objects. + setResetPingStrikes func() } // NewServerTransport creates a http2 transport with conn and configuration @@ -163,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -175,16 +181,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, Val: config.MaxStreams, }) } - dynamicWindow := true iwz := int32(initialWindowSize) if config.InitialWindowSize >= defaultWindowSize { iwz = config.InitialWindowSize - dynamicWindow = false } icwz := int32(initialWindowSize) if config.InitialConnWindowSize >= defaultWindowSize { icwz = config.InitialConnWindowSize - dynamicWindow = false } if iwz != defaultWindowSize { isettings = append(isettings, http2.Setting{ @@ -258,13 +261,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*ServerStream), - stats: config.StatsHandlers, + stats: config.StatsHandler, kp: kp, idle: time.Now(), kep: kep, initialWindowSize: iwz, bufferPool: config.BufferPool, } + t.setResetPingStrikes = func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { czSecurity = au.GetSecurityValue() @@ -284,7 +290,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.logger = prefixLoggerForServerTransport(t) t.controlBuf = newControlBuffer(t.done) - if dynamicWindow { + if !config.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, @@ -385,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade } t.maxStreamID = streamID - buf := newRecvBuffer() s := &ServerStream{ - Stream: &Stream{ - id: streamID, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + Stream: Stream{ + id: streamID, + fc: inFlow{limit: uint32(t.initialWindowSize)}, }, st: t, headerWireLength: int(frame.Header().Length), } + s.Stream.buf.init() var ( // if false, content-type was missing or invalid isGRPC = false @@ -595,10 +600,25 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade return nil } } + + if s.ctx.Err() != nil { + t.mu.Unlock() + // Early abort in case the timeout was zero or so low it already fired. + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusOK, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()), + rst: !frame.StreamEnded(), + }) + return nil + } + t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } + // Start a timer to close the stream on reaching the deadline. if timeoutSet { // We need to wait for s.cancel to be updated before calling @@ -620,25 +640,21 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.channelz.SocketMetrics.StreamsStarted.Add(1) t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.readRequester = s s.ctxDone = s.ctx.Done() - s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) - s.trReader = &transportReader{ - reader: &recvBufferReader{ + s.Stream.wq.init(defaultWriteQuota, s.ctxDone) + s.trReader = transportReader{ + reader: recvBufferReader{ ctx: s.ctx, ctxDone: s.ctxDone, - recv: s.buf, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + recv: &s.buf, }, + windowHandler: s, } // Register the stream with loopy. t.controlBuf.put(®isterStream{ streamID: s.id, - wq: s.wq, + wq: &s.wq, }) handle(s) return nil @@ -654,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }() for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -691,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }) continue } - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: @@ -772,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) { } -func (t *http2Server) handleData(f *http2.DataFrame) { +func (t *http2Server) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -817,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) { t.closeStream(s, true, http2.ErrCodeFlowControl, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } if f.StreamEnded() { @@ -1015,10 +1025,6 @@ func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { return nil } -func (t *http2Server) setResetPingStrikes() { - atomic.StoreUint32(&t.resetPingStrikes, 1) -} - func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. @@ -1043,19 +1049,18 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - for _, sh := range t.stats { + if t.stats != nil { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. - outHeader := &stats.OutHeader{ + t.stats.HandleRPC(s.Context(), &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, - } - sh.HandleRPC(s.Context(), outHeader) + }) } return nil } -// WriteStatus sends stream status to the client and terminates the stream. +// writeStatus sends stream status to the client and terminates the stream. // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. @@ -1083,7 +1088,7 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := st.Proto(); p != nil && len(p.Details) > 0 { + if p := istatus.RawStatusProto(st); len(p.GetDetails()) > 0 { // Do not use the user's grpc-status-details-bin (if present) if we are // even attempting to set our own. delete(s.trailer, grpcStatusDetailsBinHeader) @@ -1118,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - for _, sh := range t.stats { + if t.stats != nil { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutTrailer{ + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1131,17 +1136,13 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { - reader := data.Reader() - if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.writeHeader(s, nil); err != nil { - _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - _ = reader.Close() return t.streamContextErr(s) } } @@ -1149,15 +1150,16 @@ func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ df := &dataFrame{ streamID: s.id, h: hdr, - reader: reader, + data: data, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return t.streamContextErr(s) } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } t.incrMsgSent() @@ -1292,7 +1294,8 @@ func (t *http2Server) Close(err error) { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() - if _, ok := t.activeStreams[s.id]; ok { + _, isActive := t.activeStreams[s.id] + if isActive { delete(t.activeStreams, s.id) if len(t.activeStreams) == 0 { t.idle = time.Now() @@ -1300,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { } t.mu.Unlock() - if channelz.IsOn() { + if isActive && channelz.IsOn() { if eosReceived { t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { @@ -1340,10 +1343,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo // called to interrupt the potential blocking on other goroutines. s.cancel() - oldState := s.swapState(streamDone) - if oldState == streamDone { - return - } + // We can't return early even if the stream's state is "done" as the state + // might have been set by the `finishStream` method. Deleting the stream via + // `finishStream` can get blocked on flow control. + s.swapState(streamDone) t.deleteStream(s, eosReceived) t.controlBuf.put(&cleanupStream{ diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index f997f9fdb..6209eb23c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -25,7 +25,6 @@ import ( "fmt" "io" "math" - "net" "net/http" "net/url" "strconv" @@ -37,6 +36,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" ) const ( @@ -196,11 +196,11 @@ func decodeTimeout(s string) (time.Duration, error) { if !ok { return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) } - t, err := strconv.ParseInt(s[:size-1], 10, 64) + t, err := strconv.ParseUint(s[:size-1], 10, 64) if err != nil { return 0, err } - const maxHours = math.MaxInt64 / int64(time.Hour) + const maxHours = math.MaxInt64 / uint64(time.Hour) if d == time.Hour && t > maxHours { // This timeout would overflow math.MaxInt64; clamp it. return time.Duration(math.MaxInt64), nil @@ -300,11 +300,11 @@ type bufWriter struct { buf []byte offset int batchSize int - conn net.Conn + conn io.Writer err error } -func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { +func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter { w := &bufWriter{ batchSize: batchSize, conn: conn, @@ -388,15 +388,35 @@ func toIOError(err error) error { return ioError{error: err} } +type parsedDataFrame struct { + http2.FrameHeader + data mem.Buffer +} + +func (df *parsedDataFrame) StreamEnded() bool { + return df.FrameHeader.Flags.Has(http2.FlagDataEndStream) +} + type framer struct { - writer *bufWriter - fr *http2.Framer + writer *bufWriter + fr *http2.Framer + headerBuf []byte // cached slice for framer headers to reduce heap allocs. + reader io.Reader + dataFrame parsedDataFrame // Cached data frame to avoid heap allocations. + pool mem.BufferPool + errDetail error } var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { +func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer { + if memPool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream + // is always initialized with a BufferPool. + memPool = mem.DefaultBufferPool() + } + if writeBufferSize < 0 { writeBufferSize = 0 } @@ -412,6 +432,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu f := &framer{ writer: w, fr: http2.NewFramer(w, r), + reader: r, + pool: memPool, } f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -422,6 +444,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } +// writeData writes a DATA frame. +// +// It is the caller's responsibility not to violate the maximum frame size. +func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error { + var flags http2.Flags + if endStream { + flags = http2.FlagDataEndStream + } + length := uint32(0) + for _, d := range data { + length += uint32(len(d)) + } + // TODO: Replace the header write with the framer API being added in + // https://github.com/golang/go/issues/66655. + f.headerBuf = append(f.headerBuf[:0], + byte(length>>16), + byte(length>>8), + byte(length), + byte(http2.FrameData), + byte(flags), + byte(streamID>>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) + if _, err := f.writer.Write(f.headerBuf); err != nil { + return err + } + for _, d := range data { + if _, err := f.writer.Write(d); err != nil { + return err + } + } + return nil +} + +// readFrame reads a single frame. The returned Frame is only valid +// until the next call to readFrame. +func (f *framer) readFrame() (any, error) { + f.errDetail = nil + fh, err := f.fr.ReadFrameHeader() + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + // Read the data frame directly from the underlying io.Reader to avoid + // copies. + if fh.Type == http2.FrameData { + err = f.readDataFrame(fh) + return &f.dataFrame, err + } + fr, err := f.fr.ReadFrameForHeader(fh) + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + return fr, err +} + +// errorDetail returns a more detailed error of the last error +// returned by framer.readFrame. For instance, if readFrame +// returns a StreamError with code PROTOCOL_ERROR, errorDetail +// will say exactly what was invalid. errorDetail is not guaranteed +// to return a non-nil value. +// errorDetail is reset after the next call to readFrame. +func (f *framer) errorDetail() error { + return f.errDetail +} + +func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + f.errDetail = errors.New("DATA frame with stream ID 0") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + // Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This + // conversion is performed by mem.NewBuffer. To avoid the extra allocation + // a []byte is allocated directly if required and cast to a mem.SliceBuffer. + var buf []byte + // poolHandle is the pointer returned by the buffer pool (if it's used.). + var poolHandle *[]byte + useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length)) + if useBufferPool { + poolHandle = f.pool.Get(int(fh.Length)) + buf = *poolHandle + defer func() { + if err != nil { + f.pool.Put(poolHandle) + } + }() + } else { + buf = make([]byte, int(fh.Length)) + } + if fh.Flags.Has(http2.FlagDataPadded) { + if fh.Length == 0 { + return io.ErrUnexpectedEOF + } + // This initial 1-byte read can be inefficient for unbuffered readers, + // but it allows the rest of the payload to be read directly to the + // start of the destination slice. This makes it easy to return the + // original slice back to the buffer pool. + if _, err := io.ReadFull(f.reader, buf[:1]); err != nil { + return err + } + padSize := buf[0] + buf = buf[:len(buf)-1] + if int(padSize) > len(buf) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + f.errDetail = errors.New("pad size larger than data payload") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + buf = buf[:len(buf)-int(padSize)] + } else if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + + f.dataFrame.FrameHeader = fh + if useBufferPool { + // Update the handle to point to the (potentially re-sliced) buf. + *poolHandle = buf + f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool) + } else { + f.dataFrame.data = mem.SliceBuffer(buf) + } + return nil +} + +func (df *parsedDataFrame) Header() http2.FrameHeader { + return df.FrameHeader +} + func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go index cf8da0b52..ed6a13b75 100644 --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -32,7 +32,7 @@ import ( // ServerStream implements streaming functionality for a gRPC server. type ServerStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. st internalServerTransport ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) @@ -43,12 +43,13 @@ type ServerStream struct { // Holds compressor names passed in grpc-accept-encoding metadata from the // client. clientAdvertisedCompressors string - headerWireLength int // hdrMu protects outgoing header and trailer metadata. hdrMu sync.Mutex header metadata.MD // the outgoing header metadata. Updated by WriteHeader. headerSent atomic.Bool // atomically set when the headers are sent out. + + headerWireLength int } // Read reads an n byte message from the input stream. @@ -178,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error { s.hdrMu.Unlock() return nil } + +func (s *ServerStream) requestRead(n int) { + s.st.adjustWindow(s, uint32(n)) +} + +func (s *ServerStream) updateWindow(n int) { + s.st.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index af4a4aeab..5ff83a7d7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -68,11 +68,11 @@ type recvBuffer struct { err error } -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan recvMsg, 1), - } - return b +// init allows a recvBuffer to be initialized in-place, which is useful +// for resetting a buffer or for avoiding a heap allocation when the buffer +// is embedded in another struct. +func (b *recvBuffer) init() { + b.c = make(chan recvMsg, 1) } func (b *recvBuffer) put(r recvMsg) { @@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg { // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. - ctx context.Context - ctxDone <-chan struct{} // cache of ctx.Done() (for performance). - recv *recvBuffer - last mem.Buffer // Stores the remaining data in the previous calls. - err error + _ noCopy + clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last mem.Buffer // Stores the remaining data in the previous calls. + err error } func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { @@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { n, r.last = mem.ReadUnsafe(header, r.last) return n, nil } - if r.closeStream != nil { + if r.clientStream != nil { n, r.err = r.readMessageHeaderClient(header) } else { n, r.err = r.readMessageHeader(header) @@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { } return buf, nil } - if r.closeStream != nil { + if r.clientStream != nil { buf, r.err = r.readClient(n) } else { buf, r.err = r.read(n) @@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): @@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readAdditional(m, n) case m := <-r.recv.get(): @@ -285,27 +286,32 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { - id uint32 ctx context.Context // the associated context of the stream method string // the associated RPC method of the stream recvCompress string sendCompress string - buf *recvBuffer - trReader *transportReader - fc *inFlow - wq *writeQuota - - // Callback to state application's intentions to read data. This - // is used to adjust flow control, if needed. - requestRead func(int) - state streamState + readRequester readRequester // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string trailer metadata.MD // the key-value map of trailer metadata. + + // Non-pointer fields are at the end to optimize GC performance. + state streamState + id uint32 + buf recvBuffer + trReader transportReader + fc inFlow + wq writeQuota +} + +// readRequester is used to state application's intentions to read data. This +// is used to adjust flow control, if needed. +type readRequester interface { + requestRead(int) } func (s *Stream) swapState(st streamState) streamState { @@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) { if er := s.trReader.er; er != nil { return er } - s.requestRead(len(header)) + s.readRequester.requestRead(len(header)) for len(header) != 0 { n, err := s.trReader.ReadMessageHeader(header) header = header[n:] @@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { if er := s.trReader.er; er != nil { return nil, er } - s.requestRead(n) + s.readRequester.requestRead(n) for n != 0 { buf, err := s.trReader.Read(n) var bufLen int @@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { return data, nil } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader *recvBufferReader + _ noCopy // The handler to control the window update procedure for both this // particular stream and the associated transport. - windowHandler func(int) + windowHandler windowHandler er error + reader recvBufferReader +} + +// The handler to control the window update procedure for both this +// particular stream and the associated transport. +type windowHandler interface { + updateWindow(int) } func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { @@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(n) + t.windowHandler.updateWindow(n) return n, nil } @@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { t.er = err return buf, err } - t.windowHandler(buf.Len()) + t.windowHandler.updateWindow(buf.Len()) return buf, nil } @@ -454,7 +478,7 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandlers []stats.Handler + StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 @@ -466,6 +490,7 @@ type ServerConfig struct { MaxHeaderListSize *uint32 HeaderTableSize *uint32 BufferPool mem.BufferPool + StaticWindowSize bool } // ConnectOptions covers all relevant options for communicating with the server. @@ -504,6 +529,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // The mem.BufferPool to use when reading/writing to the wire. BufferPool mem.BufferPool + // StaticWindowSize controls whether dynamic window sizing is enabled. + StaticWindowSize bool } // WriteOptions provides additional hints and information for message @@ -540,6 +567,11 @@ type CallHdr struct { PreviousAttempts int // value of grpc-previous-rpc-attempts header to set DoneFunc func() // called when the stream is finished + + // Authority is used to explicitly override the `:authority` header. If set, + // this value takes precedence over the Host field and will be used as the + // value for the `:authority` header. + Authority string } // ClientTransport is the common interface for all gRPC client-side transport @@ -607,6 +639,8 @@ type internalServerTransport interface { write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error writeStatus(s *ServerStream, st *status.Status) error incrMsgRecv() + adjustWindow(s *ServerStream, n uint32) + updateWindow(s *ServerStream, n uint32) } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go index c37c58c02..f211e7274 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -32,6 +32,9 @@ type BufferPool interface { Get(length int) *[]byte // Put returns a buffer to the pool. + // + // The provided pointer must hold a prefix of the buffer obtained via + // BufferPool.Get to ensure the buffer's entire capacity can be re-used. Put(*[]byte) } @@ -118,7 +121,11 @@ type sizedBufferPool struct { } func (p *sizedBufferPool) Get(size int) *[]byte { - buf := p.pool.Get().(*[]byte) + buf, ok := p.pool.Get().(*[]byte) + if !ok { + buf := make([]byte, size, p.defaultSize) + return &buf + } b := *buf clear(b[:cap(b)]) *buf = b[:size] @@ -137,12 +144,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) { func newSizedBufferPool(size int) *sizedBufferPool { return &sizedBufferPool{ - pool: sync.Pool{ - New: func() any { - buf := make([]byte, size) - return &buf - }, - }, defaultSize: size, } } @@ -160,6 +161,7 @@ type simpleBufferPool struct { func (p *simpleBufferPool) Get(size int) *[]byte { bs, ok := p.pool.Get().(*[]byte) if ok && cap(*bs) >= size { + clear((*bs)[:cap(*bs)]) *bs = (*bs)[:size] return bs } diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index 65002e2cc..084fb19c6 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -19,6 +19,7 @@ package mem import ( + "fmt" "io" ) @@ -117,47 +118,53 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { // Reader returns a new Reader for the input slice after taking references to // each underlying buffer. -func (s BufferSlice) Reader() Reader { +func (s BufferSlice) Reader() *Reader { s.Ref() - return &sliceReader{ + return &Reader{ data: s, len: s.Len(), } } // Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface -// with other parts systems. It also provides an additional convenience method -// Remaining(), which returns the number of unread bytes remaining in the slice. +// with other systems. +// // Buffers will be freed as they are read. -type Reader interface { - io.Reader - io.ByteReader - // Close frees the underlying BufferSlice and never returns an error. Subsequent - // calls to Read will return (0, io.EOF). - Close() error - // Remaining returns the number of unread bytes remaining in the slice. - Remaining() int -} - -type sliceReader struct { +// +// A Reader can be constructed from a BufferSlice; alternatively the zero value +// of a Reader may be used after calling Reset on it. +type Reader struct { data BufferSlice len int // The index into data[0].ReadOnlyData(). bufferIdx int } -func (r *sliceReader) Remaining() int { +// Remaining returns the number of unread bytes remaining in the slice. +func (r *Reader) Remaining() int { return r.len } -func (r *sliceReader) Close() error { +// Reset frees the currently held buffer slice and starts reading from the +// provided slice. This allows reusing the reader object. +func (r *Reader) Reset(s BufferSlice) { + r.data.Free() + s.Ref() + r.data = s + r.len = s.Len() + r.bufferIdx = 0 +} + +// Close frees the underlying BufferSlice and never returns an error. Subsequent +// calls to Read will return (0, io.EOF). +func (r *Reader) Close() error { r.data.Free() r.data = nil r.len = 0 return nil } -func (r *sliceReader) freeFirstBufferIfEmpty() bool { +func (r *Reader) freeFirstBufferIfEmpty() bool { if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { return false } @@ -168,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool { return true } -func (r *sliceReader) Read(buf []byte) (n int, _ error) { +func (r *Reader) Read(buf []byte) (n int, _ error) { if r.len == 0 { return 0, io.EOF } @@ -191,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) { return n, nil } -func (r *sliceReader) ReadByte() (byte, error) { +// ReadByte reads a single byte. +func (r *Reader) ReadByte() (byte, error) { if r.len == 0 { return 0, io.EOF } @@ -279,3 +287,59 @@ nextBuffer: } } } + +// Discard skips the next n bytes, returning the number of bytes discarded. +// +// It frees buffers as they are fully consumed. +// +// If Discard skips fewer than n bytes, it also returns an error. +func (r *Reader) Discard(n int) (discarded int, err error) { + total := n + for n > 0 && r.len > 0 { + curData := r.data[0].ReadOnlyData() + curSize := min(n, len(curData)-r.bufferIdx) + n -= curSize + r.len -= curSize + r.bufferIdx += curSize + if r.bufferIdx >= len(curData) { + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + } + } + discarded = total - n + if n > 0 { + return discarded, fmt.Errorf("insufficient bytes in reader") + } + return discarded, nil +} + +// Peek returns the next n bytes without advancing the reader. +// +// Peek appends results to the provided res slice and returns the updated slice. +// This pattern allows re-using the storage of res if it has sufficient +// capacity. +// +// The returned subslices are views into the underlying buffers and are only +// valid until the reader is advanced past the corresponding buffer. +// +// If Peek returns fewer than n bytes, it also returns an error. +func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) { + for i := 0; n > 0 && i < len(r.data); i++ { + curData := r.data[i].ReadOnlyData() + start := 0 + if i == 0 { + start = r.bufferIdx + } + curSize := min(n, len(curData)-start) + if curSize == 0 { + continue + } + res = append(res, curData[start:start+curSize]) + n -= curSize + } + if n > 0 { + return nil, fmt.Errorf("insufficient bytes in reader") + } + return res, nil +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a2d2a798d..aa52bfe95 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -48,14 +47,11 @@ type pickerGeneration struct { // actions and unblock when there's a picker update. type pickerWrapper struct { // If pickerGen holds a nil pointer, the pickerWrapper is closed. - pickerGen atomic.Pointer[pickerGeneration] - statsHandlers []stats.Handler // to record blocking picker calls + pickerGen atomic.Pointer[pickerGeneration] } -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - pw := &pickerWrapper{ - statsHandlers: statsHandlers, - } +func newPickerWrapper() *pickerWrapper { + pw := &pickerWrapper{} pw.pickerGen.Store(&pickerGeneration{ blockingCh: make(chan struct{}), }) @@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { } } +type pick struct { + transport transport.ClientTransport // the selected transport + result balancer.PickResult // the contents of the pick from the LB policy + blocked bool // set if a picker call queued for a new picker +} + // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker @@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) { var ch chan struct{} var lastPickErr error + pickBlocked := false for { pg := pw.pickerGen.Load() if pg == nil { - return nil, balancer.PickResult{}, ErrClientConnClosing + return pick{}, ErrClientConnClosing } if pg.picker == nil { ch = pg.blockingCh @@ -127,9 +130,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + return pick{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + return pick{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // In the second case, the only way it will get to this conditional is // if there is a new picker. if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } + pickBlocked = true } ch = pg.blockingCh @@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, balancer.PickResult{}, dropError{error: err} + return pick{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + return pick{}, status.Error(codes.Unavailable, err.Error()) } acbw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil } - return t, pickResult, nil + return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index ee0ff969a..1e783febf 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { } // check if the context has the relevant information to prepareMsg - if rpcInfo.preloaderInfo == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") - } if rpcInfo.preloaderInfo.codec == nil { return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index e0e9ca45e..92fdc3afa 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.5 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto @@ -659,117 +659,45 @@ func (x *ErrorResponse) GetErrorMessage() string { var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor -var file_grpc_reflection_v1_reflection_proto_rawDesc = string([]byte{ - 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, - 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, - 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, - 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, - 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, - 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, - 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, - 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_grpc_reflection_v1_reflection_proto_rawDesc = "" + + "\n" + + "#grpc/reflection/v1/reflection.proto\x12\x12grpc.reflection.v1\"\xf3\x02\n" + + "\x17ServerReflectionRequest\x12\x12\n" + + "\x04host\x18\x01 \x01(\tR\x04host\x12*\n" + + "\x10file_by_filename\x18\x03 \x01(\tH\x00R\x0efileByFilename\x126\n" + + "\x16file_containing_symbol\x18\x04 \x01(\tH\x00R\x14fileContainingSymbol\x12b\n" + + "\x19file_containing_extension\x18\x05 \x01(\v2$.grpc.reflection.v1.ExtensionRequestH\x00R\x17fileContainingExtension\x12B\n" + + "\x1dall_extension_numbers_of_type\x18\x06 \x01(\tH\x00R\x19allExtensionNumbersOfType\x12%\n" + + "\rlist_services\x18\a \x01(\tH\x00R\flistServicesB\x11\n" + + "\x0fmessage_request\"f\n" + + "\x10ExtensionRequest\x12'\n" + + "\x0fcontaining_type\x18\x01 \x01(\tR\x0econtainingType\x12)\n" + + "\x10extension_number\x18\x02 \x01(\x05R\x0fextensionNumber\"\xae\x04\n" + + "\x18ServerReflectionResponse\x12\x1d\n" + + "\n" + + "valid_host\x18\x01 \x01(\tR\tvalidHost\x12V\n" + + "\x10original_request\x18\x02 \x01(\v2+.grpc.reflection.v1.ServerReflectionRequestR\x0foriginalRequest\x12f\n" + + "\x18file_descriptor_response\x18\x04 \x01(\v2*.grpc.reflection.v1.FileDescriptorResponseH\x00R\x16fileDescriptorResponse\x12r\n" + + "\x1eall_extension_numbers_response\x18\x05 \x01(\v2+.grpc.reflection.v1.ExtensionNumberResponseH\x00R\x1ballExtensionNumbersResponse\x12_\n" + + "\x16list_services_response\x18\x06 \x01(\v2'.grpc.reflection.v1.ListServiceResponseH\x00R\x14listServicesResponse\x12J\n" + + "\x0eerror_response\x18\a \x01(\v2!.grpc.reflection.v1.ErrorResponseH\x00R\rerrorResponseB\x12\n" + + "\x10message_response\"L\n" + + "\x16FileDescriptorResponse\x122\n" + + "\x15file_descriptor_proto\x18\x01 \x03(\fR\x13fileDescriptorProto\"j\n" + + "\x17ExtensionNumberResponse\x12$\n" + + "\x0ebase_type_name\x18\x01 \x01(\tR\fbaseTypeName\x12)\n" + + "\x10extension_number\x18\x02 \x03(\x05R\x0fextensionNumber\"T\n" + + "\x13ListServiceResponse\x12=\n" + + "\aservice\x18\x01 \x03(\v2#.grpc.reflection.v1.ServiceResponseR\aservice\"%\n" + + "\x0fServiceResponse\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\"S\n" + + "\rErrorResponse\x12\x1d\n" + + "\n" + + "error_code\x18\x01 \x01(\x05R\terrorCode\x12#\n" + + "\rerror_message\x18\x02 \x01(\tR\ferrorMessage2\x89\x01\n" + + "\x10ServerReflection\x12u\n" + + "\x14ServerReflectionInfo\x12+.grpc.reflection.v1.ServerReflectionRequest\x1a,.grpc.reflection.v1.ServerReflectionResponse(\x010\x01Bf\n" + + "\x15io.grpc.reflection.v1B\x15ServerReflectionProtoP\x01Z4google.golang.org/grpc/reflection/grpc_reflection_v1b\x06proto3" var ( file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go index 031082807..f4a361c64 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -90,7 +90,7 @@ type ServerReflectionServer interface { type UnimplementedServerReflectionServer struct{} func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error { - return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") + return status.Error(codes.Unimplemented, "method ServerReflectionInfo not implemented") } func (UnimplementedServerReflectionServer) testEmbeddedByValue() {} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index fd31ebfbf..c803cf3ba 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.5 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. @@ -729,122 +729,45 @@ func (x *ErrorResponse) GetErrorMessage() string { var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor -var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = string([]byte{ - 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, - 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, - 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, - 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, - 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, - 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) +const file_grpc_reflection_v1alpha_reflection_proto_rawDesc = "" + + "\n" + + "(grpc/reflection/v1alpha/reflection.proto\x12\x17grpc.reflection.v1alpha\"\xf8\x02\n" + + "\x17ServerReflectionRequest\x12\x12\n" + + "\x04host\x18\x01 \x01(\tR\x04host\x12*\n" + + "\x10file_by_filename\x18\x03 \x01(\tH\x00R\x0efileByFilename\x126\n" + + "\x16file_containing_symbol\x18\x04 \x01(\tH\x00R\x14fileContainingSymbol\x12g\n" + + "\x19file_containing_extension\x18\x05 \x01(\v2).grpc.reflection.v1alpha.ExtensionRequestH\x00R\x17fileContainingExtension\x12B\n" + + "\x1dall_extension_numbers_of_type\x18\x06 \x01(\tH\x00R\x19allExtensionNumbersOfType\x12%\n" + + "\rlist_services\x18\a \x01(\tH\x00R\flistServicesB\x11\n" + + "\x0fmessage_request\"f\n" + + "\x10ExtensionRequest\x12'\n" + + "\x0fcontaining_type\x18\x01 \x01(\tR\x0econtainingType\x12)\n" + + "\x10extension_number\x18\x02 \x01(\x05R\x0fextensionNumber\"\xc7\x04\n" + + "\x18ServerReflectionResponse\x12\x1d\n" + + "\n" + + "valid_host\x18\x01 \x01(\tR\tvalidHost\x12[\n" + + "\x10original_request\x18\x02 \x01(\v20.grpc.reflection.v1alpha.ServerReflectionRequestR\x0foriginalRequest\x12k\n" + + "\x18file_descriptor_response\x18\x04 \x01(\v2/.grpc.reflection.v1alpha.FileDescriptorResponseH\x00R\x16fileDescriptorResponse\x12w\n" + + "\x1eall_extension_numbers_response\x18\x05 \x01(\v20.grpc.reflection.v1alpha.ExtensionNumberResponseH\x00R\x1ballExtensionNumbersResponse\x12d\n" + + "\x16list_services_response\x18\x06 \x01(\v2,.grpc.reflection.v1alpha.ListServiceResponseH\x00R\x14listServicesResponse\x12O\n" + + "\x0eerror_response\x18\a \x01(\v2&.grpc.reflection.v1alpha.ErrorResponseH\x00R\rerrorResponseB\x12\n" + + "\x10message_response\"L\n" + + "\x16FileDescriptorResponse\x122\n" + + "\x15file_descriptor_proto\x18\x01 \x03(\fR\x13fileDescriptorProto\"j\n" + + "\x17ExtensionNumberResponse\x12$\n" + + "\x0ebase_type_name\x18\x01 \x01(\tR\fbaseTypeName\x12)\n" + + "\x10extension_number\x18\x02 \x03(\x05R\x0fextensionNumber\"Y\n" + + "\x13ListServiceResponse\x12B\n" + + "\aservice\x18\x01 \x03(\v2(.grpc.reflection.v1alpha.ServiceResponseR\aservice\"%\n" + + "\x0fServiceResponse\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\"S\n" + + "\rErrorResponse\x12\x1d\n" + + "\n" + + "error_code\x18\x01 \x01(\x05R\terrorCode\x12#\n" + + "\rerror_message\x18\x02 \x01(\tR\ferrorMessage2\x93\x01\n" + + "\x10ServerReflection\x12\x7f\n" + + "\x14ServerReflectionInfo\x120.grpc.reflection.v1alpha.ServerReflectionRequest\x1a1.grpc.reflection.v1alpha.ServerReflectionResponse(\x010\x01Bs\n" + + "\x1aio.grpc.reflection.v1alphaB\x15ServerReflectionProtoP\x01Z9google.golang.org/grpc/reflection/grpc_reflection_v1alpha\xb8\x01\x01b\x06proto3" var ( file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 80755d74d..0a43b521c 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -87,7 +87,7 @@ type ServerReflectionServer interface { type UnimplementedServerReflectionServer struct{} func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error { - return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") + return status.Error(codes.Unimplemented, "method ServerReflectionInfo not implemented") } func (UnimplementedServerReflectionServer) testEmbeddedByValue() {} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index b84ef26d4..8e6af9514 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -332,6 +332,11 @@ type AuthorityOverrider interface { // OverrideAuthority returns the authority to use for a ClientConn with the // given target. The implementation must generate it without blocking, // typically in line, and must keep it unchanged. + // + // The returned string must be a valid ":authority" header value, i.e. be + // encoded according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as + // necessary. OverrideAuthority(Target) string } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index ad20e9dff..6b04c9e87 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -160,6 +160,7 @@ type callInfo struct { codec baseCodec maxRetryRPCBufferSize int onFinish []func(err error) + authority string } func defaultCallInfo() *callInfo { @@ -365,6 +366,36 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { } func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} +// CallAuthority returns a CallOption that sets the HTTP/2 :authority header of +// an RPC to the specified value. When using CallAuthority, the credentials in +// use must implement the AuthorityValidator interface. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func CallAuthority(authority string) CallOption { + return AuthorityOverrideCallOption{Authority: authority} +} + +// AuthorityOverrideCallOption is a CallOption that indicates the HTTP/2 +// :authority header value to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a later +// release. +type AuthorityOverrideCallOption struct { + Authority string +} + +func (o AuthorityOverrideCallOption) before(c *callInfo) error { + c.authority = o.Authority + return nil +} + +func (o AuthorityOverrideCallOption) after(*callInfo, *csAttempt) {} + // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default // `math.MaxInt32`. @@ -626,8 +657,20 @@ type streamReader interface { Read(n int) (mem.BufferSlice, error) } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // parser reads complete gRPC messages from the underlying reader. type parser struct { + _ noCopy // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. @@ -918,7 +961,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR // Information about RPC type rpcInfo struct { failfast bool - preloaderInfo *compressorInfo + preloaderInfo compressorInfo } // Information about Preloader @@ -937,7 +980,7 @@ type rpcInfoContextKey struct{} func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ failfast: failfast, - preloaderInfo: &compressorInfo{ + preloaderInfo: compressorInfo{ codec: codec, cp: cp, comp: comp, diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 976e70ae0..ddd377341 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -124,7 +124,8 @@ type serviceInfo struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts serverOptions + opts serverOptions + statsHandler stats.Handler mu sync.Mutex // guards following lis map[net.Listener]bool @@ -179,6 +180,7 @@ type serverOptions struct { numServerWorkers uint32 bufferPool mem.BufferPool waitForHandlers bool + staticWindowSize bool } var defaultServerOptions = serverOptions{ @@ -279,6 +281,7 @@ func ReadBufferSize(s int) ServerOption { func InitialWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialWindowSize = s + o.staticWindowSize = true }) } @@ -287,6 +290,29 @@ func InitialWindowSize(s int32) ServerOption { func InitialConnWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialConnWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticStreamWindowSize returns a ServerOption to set the initial stream +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticStreamWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticConnWindowSize returns a ServerOption to set the initial connection +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + o.staticWindowSize = true }) } @@ -667,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[string]map[transport.ServerTransport]bool), - services: make(map[string]*serviceInfo), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - channelz: channelz.RegisterServer(""), + lis: make(map[net.Listener]bool), + opts: opts, + statsHandler: istats.NewCombinedHandler(opts.statsHandlers...), + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -974,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandlers: s.opts.statsHandlers, + StatsHandler: s.statsHandler, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -986,6 +1013,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, BufferPool: s.opts.bufferPool, + StaticWindowSize: s.opts.staticWindowSize, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1010,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { ctx = transport.SetConnection(ctx, rawConn) ctx = peer.NewContext(ctx, st.Peer()) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + if s.statsHandler != nil { + ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{ RemoteAddr: st.Peer().Addr, LocalAddr: st.Peer().LocalAddr, }) - sh.HandleConn(ctx, &stats.ConnBegin{}) + s.statsHandler.HandleConn(ctx, &stats.ConnBegin{}) } defer func() { st.Close(errors.New("finished serving streams for the server transport")) - for _, sh := range s.opts.statsHandlers { - sh.HandleConn(ctx, &stats.ConnEnd{}) + if s.statsHandler != nil { + s.statsHandler.HandleConn(ctx, &stats.ConnEnd{}) } }() @@ -1078,7 +1106,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) + st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1172,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = stream.Write(hdr, payload, opts) - if err == nil { - if len(s.opts.statsHandlers) != 0 { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) - } - } + if err == nil && s.statsHandler != nil { + s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) } return err } @@ -1219,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - shs := s.opts.statsHandlers - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + sh := s.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - for _, sh := range shs { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: false, IsServerStream: false, } @@ -1256,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt trInfo.tr.Finish() } - for _, sh := range shs { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1353,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt } var payInfo *payloadInfo - if len(shs) != 0 || len(binlogs) != 0 { + if sh != nil || len(binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1379,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - for _, sh := range shs { + if sh != nil { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1553,32 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if channelz.IsOn() { s.incrCallsStarted() } - shs := s.opts.statsHandlers + sh := s.statsHandler var statsBegin *stats.Begin - if len(shs) != 0 { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - for _, sh := range shs { - sh.HandleRPC(ctx, statsBegin) - } + sh.HandleRPC(ctx, statsBegin) } ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, s: stream, - p: &parser{r: stream, bufferPool: s.opts.bufferPool}, + p: parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), + desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: shs, + statsHandler: sh, } - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if sh != nil || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1592,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ss.mu.Unlock() } - if len(shs) != 0 { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1600,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - for _, sh := range shs { - sh.HandleRPC(ctx, end) - } + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1791,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser method := sm[pos+1:] // FromIncomingContext is expensive: skip if there are no statsHandlers - if len(s.opts.statsHandlers) > 0 { + if s.statsHandler != nil { md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) - } + ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + s.statsHandler.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go index dc03731e4..67194a592 100644 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -38,6 +38,15 @@ type RPCTagInfo struct { // FailFast indicates if this RPC is failfast. // This field is only valid on client side, it's always false on server side. FailFast bool + // NameResolutionDelay indicates if the RPC needed to wait for the + // initial name resolver update before it could begin. This should only + // happen if the channel is IDLE when the RPC is started. Note that + // all retry or hedging attempts for an RPC that experienced a delay + // will have it set. + // + // This field is only valid on the client side; it is always false on + // the server side. + NameResolutionDelay bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index baf7740ef..10bf998aa 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -64,15 +64,21 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} +// DelayedPickComplete indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +type DelayedPickComplete struct{} -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } +// IsClient indicates DelayedPickComplete is available on the client. +func (*DelayedPickComplete) IsClient() bool { return true } -func (*PickerUpdated) isRPCStats() {} +func (*DelayedPickComplete) isRPCStats() {} + +// PickerUpdated indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +// +// Deprecated: will be removed in a future release; use DelayedPickComplete +// instead. +type PickerUpdated = DelayedPickComplete // InPayload contains stats about an incoming payload. type InPayload struct { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 12163150b..ca87ff977 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -101,9 +101,9 @@ type ClientStream interface { // It must only be called after stream.CloseAndRecv has returned, or // stream.Recv has returned a non-nil error (including io.EOF). Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. It is also not safe to call CloseSend - // concurrently with SendMsg. + // CloseSend closes the send direction of the stream. This method always + // returns a nil error. The status of the stream may be discovered using + // RecvMsg. It is also not safe to call CloseSend concurrently with SendMsg. CloseSend() error // Context returns the context for this stream. // @@ -177,6 +177,8 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cc.NewStream(ctx, desc, method, opts...) } +var emptyMethodConfig = serviceconfig.MethodConfig{} + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { // Start tracking the RPC for idleness purposes. This is where a stream is // created for both streaming and unary RPCs, and hence is a good place to @@ -212,14 +214,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. - if err := cc.waitForResolvedAddrs(ctx); err != nil { + nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx) + if err != nil { return nil, err } - var mc serviceconfig.MethodConfig + mc := &emptyMethodConfig var onCommit func() newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { - return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...) } rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} @@ -239,7 +242,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if rpcConfig.Context != nil { ctx = rpcConfig.Context } - mc = rpcConfig.MethodConfig + mc = &rpcConfig.MethodConfig onCommit = rpcConfig.OnCommitted if rpcConfig.Interceptor != nil { rpcInfo.Context = nil @@ -257,7 +260,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return newStream(ctx, func() {}) } -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { callInfo := defaultCallInfo() if mc.WaitForReady != nil { callInfo.failFast = !*mc.WaitForReady @@ -296,6 +299,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client Method: method, ContentSubtype: callInfo.contentSubtype, DoneFunc: doneFunc, + Authority: callInfo.authority, } // Set our outgoing compression according to the UseCompressor CallOption, if @@ -321,19 +325,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs := &clientStream{ - callHdr: callHdr, - ctx: ctx, - methodConfig: &mc, - opts: opts, - callInfo: callInfo, - cc: cc, - desc: desc, - codec: callInfo.codec, - compressorV0: compressorV0, - compressorV1: compressorV1, - cancel: cancel, - firstAttempt: true, - onCommit: onCommit, + callHdr: callHdr, + ctx: ctx, + methodConfig: mc, + opts: opts, + callInfo: callInfo, + cc: cc, + desc: desc, + codec: callInfo.codec, + compressorV0: compressorV0, + compressorV1: compressorV1, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + nameResolutionDelay: nameResolutionDelayed, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) @@ -415,19 +420,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time - shs := cs.cc.dopts.copts.StatsHandlers - for _, sh := range shs { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + sh := cs.cc.statsHandler + if sh != nil { beginTime = time.Now() - begin := &stats.Begin{ + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{ + FullMethodName: method, FailFast: cs.callInfo.failFast, + NameResolutionDelay: cs.nameResolutionDelay, + }) + sh.HandleRPC(ctx, &stats.Begin{ Client: true, BeginTime: beginTime, FailFast: cs.callInfo.failFast, IsClientStream: cs.desc.ClientStreams, IsServerStream: cs.desc.ServerStreams, IsTransparentRetryAttempt: isTransparent, - } - sh.HandleRPC(ctx, begin) + }) } var trInfo *traceInfo @@ -458,7 +465,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) beginTime: beginTime, cs: cs, decompressorV0: cs.cc.dopts.dc, - statsHandlers: shs, + statsHandler: sh, trInfo: trInfo, }, nil } @@ -466,8 +473,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) func (a *csAttempt) getTransport() error { cs := a.cs - var err error - a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method} + pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo) + a.transport, a.pickResult = pick.transport, pick.result if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -478,6 +486,9 @@ func (a *csAttempt) getTransport() error { if a.trInfo != nil { a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } + if pick.blocked && a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) + } return nil } @@ -520,7 +531,7 @@ func (a *csAttempt) newStream() error { } a.transportStream = s a.ctx = s.Context() - a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -540,6 +551,8 @@ type clientStream struct { sentLast bool // sent an end stream + receivedFirstMsg bool // set after the first message is received + methodConfig *MethodConfig ctx context.Context // the application's context, wrapped by stats/tracing @@ -573,6 +586,9 @@ type clientStream struct { onCommit func() replayBuffer []replayOp // operations to replay on retry replayBufferSize int // current size of replayBuffer + // nameResolutionDelay indicates if there was a delay in the name resolution. + // This field is only valid on client side, it's always false on server side. + nameResolutionDelay bool } type replayOp struct { @@ -587,7 +603,7 @@ type csAttempt struct { cs *clientStream transport transport.ClientTransport transportStream *transport.ClientStream - parser *parser + parser parser pickResult balancer.PickResult finished bool @@ -601,8 +617,8 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandlers []stats.Handler - beginTime time.Time + statsHandler stats.Handler + beginTime time.Time // set for newStream errors that may be transparently retried allowTransparentRetry bool @@ -987,7 +1003,7 @@ func (cs *clientStream) RecvMsg(m any) error { func (cs *clientStream) CloseSend() error { if cs.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? + // Return a nil error on repeated calls to this method. return nil } cs.sentLast = true @@ -1008,7 +1024,10 @@ func (cs *clientStream) CloseSend() error { binlog.Log(cs.ctx, chc) } } - // We never returned an error here for reasons. + // We don't return an error here as we expect users to read all messages + // from the stream and get the RPC status from RecvMsg(). Note that + // SendMsg() must return an error when one occurs so the application + // knows to stop sending messages, but that does not apply here. return nil } @@ -1093,17 +1112,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } return io.EOF } - if len(a.statsHandlers) != 0 { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) - } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } return nil } func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs - if len(a.statsHandlers) != 0 && payInfo == nil { + if a.statsHandler != nil && payInfo == nil { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1124,16 +1141,21 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompressorSet = true } - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !cs.desc.ServerStreams && !cs.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + cs.receivedFirstMsg = true if a.trInfo != nil { a.mu.Lock() if a.trInfo.tr != nil { @@ -1141,8 +1163,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.InPayload{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1157,12 +1179,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (a *csAttempt) finish(err error) { @@ -1195,15 +1217,14 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - for _, sh := range a.statsHandlers { - end := &stats.End{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.End{ Client: true, BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, - } - sh.HandleRPC(a.ctx, end) + }) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1309,7 +1330,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.transportStream = s - as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1344,6 +1365,7 @@ type addrConnStream struct { transport transport.ClientTransport ctx context.Context sentLast bool + receivedFirstMsg bool desc *StreamDesc codec baseCodec sendCompressorV0 Compressor @@ -1351,7 +1373,7 @@ type addrConnStream struct { decompressorSet bool decompressorV0 Decompressor decompressorV1 encoding.Compressor - parser *parser + parser parser // mu guards finished and is held for the entire finish method. mu sync.Mutex @@ -1372,7 +1394,7 @@ func (as *addrConnStream) Trailer() metadata.MD { func (as *addrConnStream) CloseSend() error { if as.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? + // Return a nil error on repeated calls to this method. return nil } as.sentLast = true @@ -1464,15 +1486,20 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompressorSet = true } - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !as.desc.ServerStreams && !as.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + as.receivedFirstMsg = true if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. @@ -1481,12 +1508,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (as *addrConnStream) finish(err error) { @@ -1569,8 +1596,9 @@ type ServerStream interface { type serverStream struct { ctx context.Context s *transport.ServerStream - p *parser + p parser codec baseCodec + desc *StreamDesc compressorV0 Compressor compressorV1 encoding.Compressor @@ -1579,11 +1607,13 @@ type serverStream struct { sendCompressorName string + recvFirstMsg bool // set after the first message is received + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo - statsHandler []stats.Handler + statsHandler stats.Handler binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It @@ -1719,10 +1749,8 @@ func (ss *serverStream) SendMsg(m any) (err error) { binlog.Log(ss.ctx, sm) } } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } return nil } @@ -1753,11 +1781,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } }() var payInfo *payloadInfo - if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + if ss.statsHandler != nil || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1765,6 +1793,10 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, chc) } } + // Received no request msg for non-client streaming rpcs. + if !ss.desc.ClientStreams && !ss.recvFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC") + } return err } if err == io.ErrUnexpectedEOF { @@ -1772,16 +1804,15 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } return toRPCErr(err) } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - Length: payInfo.uncompressedBytes.Len(), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - }) - } + ss.recvFirstMsg = true + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ @@ -1791,7 +1822,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, cm) } } - return nil + + if ss.desc.ClientStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-client-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + return nil + } else if err != nil { + return err + } + return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC") } // MethodFromServerStream returns the method string for the input stream. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 2bae4db89..9e6d018fb 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.72.2" +const Version = "1.77.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index f6d38d69c..5dc6d32ab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -93,11 +93,9 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0 -## explicit; go 1.21 -github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1 # github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 ## explicit; go 1.22.0 +github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1 github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1 # github.com/mailru/easyjson v0.9.0 ## explicit; go 1.20 @@ -201,8 +199,10 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.10 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/vmware-tanzu/velero v1.14.0 => github.com/openshift/velero v0.10.2-0.20250514165055-8fbcf3a8da11 -## explicit; go 1.23.0 +# github.com/vmware-tanzu/velero v1.14.0 => github.com/openshift/velero v0.10.2-0.20260323170432-5ef912f438f6 +## explicit; go 1.25.0 +github.com/vmware-tanzu/velero/internal/credentials +github.com/vmware-tanzu/velero/internal/resourcepolicies github.com/vmware-tanzu/velero/pkg/apis/velero/shared github.com/vmware-tanzu/velero/pkg/apis/velero/v1 github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1 @@ -240,6 +240,7 @@ github.com/vmware-tanzu/velero/pkg/util/filesystem github.com/vmware-tanzu/velero/pkg/util/kube github.com/vmware-tanzu/velero/pkg/util/logging github.com/vmware-tanzu/velero/pkg/util/results +github.com/vmware-tanzu/velero/pkg/util/wildcard github.com/vmware-tanzu/velero/third_party/kubernetes/pkg/kubectl/cmd/util # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 @@ -252,7 +253,6 @@ go.yaml.in/yaml/v2 go.yaml.in/yaml/v3 # golang.org/x/net v0.52.0 ## explicit; go 1.25.0 -golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/html/charset @@ -307,11 +307,11 @@ golang.org/x/time/rate # gomodules.xyz/jsonpatch/v2 v2.5.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.72.2 -## explicit; go 1.23 +# google.golang.org/grpc v1.77.0 +## explicit; go 1.24.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -321,7 +321,6 @@ google.golang.org/grpc/balancer/endpointsharding google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/pickfirst/internal -google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -330,6 +329,7 @@ google.golang.org/grpc/connectivity google.golang.org/grpc/credentials google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/internal google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog @@ -940,4 +940,4 @@ sigs.k8s.io/structured-merge-diff/v6/value # sigs.k8s.io/yaml v1.6.0 ## explicit; go 1.22 sigs.k8s.io/yaml -# github.com/vmware-tanzu/velero => github.com/openshift/velero v0.10.2-0.20250514165055-8fbcf3a8da11 +# github.com/vmware-tanzu/velero => github.com/openshift/velero v0.10.2-0.20260323170432-5ef912f438f6